code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
import string
def lowerCamelCase ( _UpperCamelCase : str ) -> None:
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__UpperCAmelCase : List[Any] = """"""
for symbol in message:
if symbol in string.ascii_uppercase:
__UpperCAmelCase : List[Any] = string.ascii_uppercase.find(_UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = num - key
if num < 0:
__UpperCAmelCase : Optional[int] = num + len(string.ascii_uppercase )
__UpperCAmelCase : List[Any] = translated + string.ascii_uppercase[num]
else:
__UpperCAmelCase : str = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def lowerCamelCase ( ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[str] = input("""Encrypted message: """ )
__UpperCAmelCase : List[Any] = message.upper()
decrypt(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 139
|
"""simple docstring"""
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
UpperCAmelCase : List[Any] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
UpperCAmelCase : List[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
UpperCAmelCase : str = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
UpperCAmelCase : str = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
UpperCAmelCase : Dict = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
UpperCAmelCase : Optional[int] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 139
| 1
|
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = FunnelTokenizer
__magic_name__ : List[Any] = FunnelTokenizerFast
__magic_name__ : List[str] = True
__magic_name__ : int = True
def lowercase_ ( self) -> int:
"""simple docstring"""
super().setUp()
a_ =[
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def lowercase_ ( self , **lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return FunnelTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def lowercase_ ( self , **lowerCAmelCase_) -> List[Any]:
"""simple docstring"""
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="UNwant\u00E9d,running"
a_ ="unwanted, running"
return input_text, output_text
def lowercase_ ( self) -> int:
"""simple docstring"""
a_ =self.tokenizer_class(self.vocab_file)
a_ =tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(_UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [7, 4, 5, 1_0, 8, 9])
def lowercase_ ( self) -> Any:
"""simple docstring"""
a_ =self.get_tokenizers(do_lower_case=_UpperCAmelCase)
for tokenizer in tokenizers:
a_ =tokenizer("UNwant\u00E9d,running")
a_ =len(inputs["input_ids"]) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len)
a_ =tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running")
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len)
| 714
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
if b == 0:
return (1, 0)
((a_) , (a_)) =extended_euclid(lowercase__ , a % b )
a_ =a // b
return (y, x - k * y)
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
((a_) , (a_)) =extended_euclid(lowercase__ , lowercase__ )
if b < 0:
a_ =(b % n + n) % n
return b
def UpperCAmelCase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
a_ , a_ =invert_modulo(lowercase__ , lowercase__ ), invert_modulo(lowercase__ , lowercase__ )
a_ =na * na
a_ =ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 41
| 0
|
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a_ : Dict = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _snake_case :
def __init__( self , a , a=16 , a=13 , a=7 , a=14 , a=10 , a=19 , a=5 , a=4 , a=True , a=16 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=[1, 2, 3, 4, 5] , a=25 , a=5 , ) -> Optional[int]:
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = prediction_length
SCREAMING_SNAKE_CASE = context_length
SCREAMING_SNAKE_CASE = cardinality
SCREAMING_SNAKE_CASE = num_time_features
SCREAMING_SNAKE_CASE = lags_sequence
SCREAMING_SNAKE_CASE = embedding_dimension
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = context_length
SCREAMING_SNAKE_CASE = prediction_length + label_length
SCREAMING_SNAKE_CASE = label_length
SCREAMING_SNAKE_CASE = moving_average
SCREAMING_SNAKE_CASE = autocorrelation_factor
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def SCREAMING_SNAKE_CASE__ ( self , a) -> Dict:
SCREAMING_SNAKE_CASE = config.context_length + max(config.lags_sequence)
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, 1] , config.cardinality[0])
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length, config.num_time_features])
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length])
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, _past_length]) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features])
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, config.prediction_length])
SCREAMING_SNAKE_CASE = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
SCREAMING_SNAKE_CASE = self.get_config()
SCREAMING_SNAKE_CASE = self.prepare_autoformer_inputs_dict(a)
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self) -> int:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = AutoformerModel(config=a).to(a).eval()
SCREAMING_SNAKE_CASE = model(**a)
SCREAMING_SNAKE_CASE = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = model.get_encoder()
encoder.save_pretrained(a)
SCREAMING_SNAKE_CASE = AutoformerEncoder.from_pretrained(a).to(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.create_network_inputs(**a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...])
SCREAMING_SNAKE_CASE = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE = encoder(inputs_embeds=a)[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3)
SCREAMING_SNAKE_CASE = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1)
.unsqueeze(1)
.repeat(1 , config.prediction_length , 1)
)
SCREAMING_SNAKE_CASE = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE = model.get_decoder()
decoder.save_pretrained(a)
SCREAMING_SNAKE_CASE = AutoformerDecoder.from_pretrained(a).to(a)
SCREAMING_SNAKE_CASE = decoder(
trend=a , inputs_embeds=a , encoder_hidden_states=a , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3)
@require_torch
class _snake_case ( A__ , A__ , unittest.TestCase ):
_lowercase : List[Any] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowercase : Any = (AutoformerForPrediction,) if is_torch_available() else ()
_lowercase : Optional[Any] = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
_lowercase : Union[str, Any] = False
_lowercase : str = False
_lowercase : List[Any] = False
_lowercase : List[str] = False
_lowercase : Optional[Any] = False
_lowercase : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
SCREAMING_SNAKE_CASE = AutoformerModelTester(self)
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=a , has_text_modality=a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model_class.from_pretrained(a , output_loading_info=a)
self.assertEqual(info['missing_keys'] , [])
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a)
@unittest.skip(reason='Model has no tokens embeddings')
def SCREAMING_SNAKE_CASE__ ( self) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]:
SCREAMING_SNAKE_CASE = inspect.signature(getattr(a , 'forward'))
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE = list(model_signature.parameters.keys())[1]
self.assertEqual(AutoformerModel.main_input_name , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Dict:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(a)
SCREAMING_SNAKE_CASE = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask')
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
])
self.assertListEqual(arg_names[: len(a)] , a)
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'seq_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'decoder_seq_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'encoder_seq_length' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'd_model' , a)
SCREAMING_SNAKE_CASE = getattr(self.model_tester , 'num_attention_heads' , a)
SCREAMING_SNAKE_CASE = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(a)
model.to(a)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a))
SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(a)
model.to(a)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a))
SCREAMING_SNAKE_CASE = outputs.encoder_attentions
self.assertEqual(len(a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE = len(a)
SCREAMING_SNAKE_CASE = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(a , a)
# decoder attentions
SCREAMING_SNAKE_CASE = outputs.decoder_attentions
self.assertIsInstance(a , (list, tuple))
self.assertEqual(len(a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE = outputs.cross_attentions
self.assertIsInstance(a , (list, tuple))
self.assertEqual(len(a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = model_class(a)
model.to(a)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(a , a))
self.assertEqual(out_len + 2 , len(a))
SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(a) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
super().test_retain_grad_hidden_states_attentions()
def lowerCamelCase__ (_UpperCAmelCase="train-batch.pt"):
SCREAMING_SNAKE_CASE = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=_UpperCAmelCase , repo_type='dataset')
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location=_UpperCAmelCase)
return batch
@require_torch
@slow
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly').to(a)
SCREAMING_SNAKE_CASE = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
SCREAMING_SNAKE_CASE = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size))
self.assertEqual(output.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.35_93, -1.33_98, 0.63_30], [0.22_79, 1.53_96, -0.17_92], [0.04_50, 1.32_25, -0.23_35]] , device=a)
self.assertTrue(torch.allclose(output[0, :3, :3] , a , atol=a))
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly').to(a)
SCREAMING_SNAKE_CASE = prepare_batch('val-batch.pt')
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE = torch.Size((64, model.config.context_length, model.config.d_model))
self.assertEqual(output.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.07_34, -0.90_36, 0.83_58], [4.71_86, 2.41_13, 1.95_81], [1.79_53, 2.35_58, 1.29_70]] , device=a)
self.assertTrue(torch.allclose(output[0, :3, :3] , a , atol=a))
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly').to(a)
SCREAMING_SNAKE_CASE = prepare_batch('val-batch.pt')
with torch.no_grad():
SCREAMING_SNAKE_CASE = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
SCREAMING_SNAKE_CASE = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length))
self.assertEqual(outputs.sequences.shape , a)
SCREAMING_SNAKE_CASE = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=a)
SCREAMING_SNAKE_CASE = outputs.sequences.mean(dim=1)
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , a , rtol=1E-1))
| 73
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ : Optional[Any] = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : List[Any] = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 73
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCAmelCase :Tuple = 16
__lowerCAmelCase :Dict = 32
def A ( UpperCAmelCase , UpperCAmelCase = 16 , UpperCAmelCase = "bert-base-cased" ):
_snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCAmelCase )
_snake_case : List[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_snake_case : int = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase , max_length=UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_snake_case : str = datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_snake_case : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(UpperCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_snake_case : List[str] = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
_snake_case : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase , collate_fn=UpperCAmelCase , batch_size=UpperCAmelCase )
return train_dataloader, eval_dataloader
def A ( UpperCAmelCase , UpperCAmelCase ):
# Initialize accelerator
_snake_case : Optional[Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_snake_case : Tuple = config["lr"]
_snake_case : int = int(config["num_epochs"] )
_snake_case : int = int(config["seed"] )
_snake_case : Optional[Any] = int(config["batch_size"] )
_snake_case : Optional[int] = args.model_name_or_path
set_seed(UpperCAmelCase )
_snake_case , _snake_case : Union[str, Any] = get_dataloaders(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_snake_case : Optional[int] = AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase , return_dict=UpperCAmelCase )
# Instantiate optimizer
_snake_case : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_snake_case : int = optimizer_cls(params=model.parameters() , lr=UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_snake_case : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_snake_case : Tuple = 1
_snake_case : List[str] = (len(UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_snake_case : List[Any] = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase , num_warmup_steps=0 , num_training_steps=UpperCAmelCase , )
else:
_snake_case : int = DummyScheduler(UpperCAmelCase , total_num_steps=UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : str = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
_snake_case : Any = 0
# We also need to keep track of the stating epoch so files are named properly
_snake_case : Optional[int] = 0
# Now we train the model
_snake_case : Optional[Any] = evaluate.load("glue" , "mrpc" )
_snake_case : Optional[Any] = 0
_snake_case : Any = {}
for epoch in range(UpperCAmelCase , UpperCAmelCase ):
model.train()
for step, batch in enumerate(UpperCAmelCase ):
_snake_case : List[Any] = model(**UpperCAmelCase )
_snake_case : Optional[Any] = outputs.loss
_snake_case : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_snake_case : Union[str, Any] = 0
for step, batch in enumerate(UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_snake_case : Dict = model(**UpperCAmelCase )
_snake_case : Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_snake_case , _snake_case : Optional[Any] = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase ) - 1:
_snake_case : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_snake_case : Dict = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase , references=UpperCAmelCase , )
_snake_case : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , UpperCAmelCase )
_snake_case : str = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
_snake_case : Tuple = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A ( ):
_snake_case : Optional[int] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=UpperCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=UpperCAmelCase , )
parser.add_argument(
"--output_dir" , type=UpperCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=UpperCAmelCase , default=UpperCAmelCase , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=UpperCAmelCase , default=3 , help="Number of train epochs." , )
_snake_case : Optional[int] = parser.parse_args()
_snake_case : Union[str, Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
main()
| 278
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase :str = logging.get_logger(__name__)
__lowerCAmelCase :int = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
__lowerCAmelCase :List[str] = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( UpperCAmelCase ):
_snake_case : str = EfficientNetConfig()
_snake_case : Optional[int] = CONFIG_MAP[model_name]["hidden_dim"]
_snake_case : Tuple = CONFIG_MAP[model_name]["width_coef"]
_snake_case : Dict = CONFIG_MAP[model_name]["depth_coef"]
_snake_case : List[Any] = CONFIG_MAP[model_name]["image_size"]
_snake_case : Tuple = CONFIG_MAP[model_name]["dropout_rate"]
_snake_case : str = CONFIG_MAP[model_name]["dw_padding"]
_snake_case : Union[str, Any] = "huggingface/label-files"
_snake_case : Any = "imagenet-1k-id2label.json"
_snake_case : Optional[Any] = 1_000
_snake_case : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type="dataset" ) , "r" ) )
_snake_case : int = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
_snake_case : Optional[int] = idalabel
_snake_case : List[Any] = {v: k for k, v in idalabel.items()}
return config
def A ( ):
_snake_case : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case : Optional[int] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
def A ( UpperCAmelCase ):
_snake_case : Optional[Any] = CONFIG_MAP[model_name]["image_size"]
_snake_case : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=UpperCAmelCase , )
return preprocessor
def A ( UpperCAmelCase ):
_snake_case : Tuple = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_snake_case : str = sorted(set(UpperCAmelCase ) )
_snake_case : Any = len(UpperCAmelCase )
_snake_case : str = {b: str(UpperCAmelCase ) for b, i in zip(UpperCAmelCase , range(UpperCAmelCase ) )}
_snake_case : Optional[int] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_snake_case : Optional[int] = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_snake_case : Union[str, Any] = {}
for item in rename_keys:
if item[0] in original_param_names:
_snake_case : str = "efficientnet." + item[1]
_snake_case : List[str] = "classifier.weight"
_snake_case : int = "classifier.bias"
return key_mapping
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
for key, value in tf_params.items():
if "normalization" in key:
continue
_snake_case : Union[str, Any] = key_mapping[key]
if "_conv" in key and "kernel" in key:
_snake_case : Union[str, Any] = torch.from_numpy(UpperCAmelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_snake_case : str = torch.from_numpy(UpperCAmelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_snake_case : Optional[Any] = torch.from_numpy(np.transpose(UpperCAmelCase ) )
else:
_snake_case : int = torch.from_numpy(UpperCAmelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(UpperCAmelCase )
@torch.no_grad()
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : Optional[int] = model_classes[model_name](
include_top=UpperCAmelCase , weights="imagenet" , input_tensor=UpperCAmelCase , input_shape=UpperCAmelCase , pooling=UpperCAmelCase , classes=1_000 , classifier_activation="softmax" , )
_snake_case : int = original_model.trainable_variables
_snake_case : str = original_model.non_trainable_variables
_snake_case : List[Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_snake_case : str = param.numpy()
_snake_case : int = list(tf_params.keys() )
# Load HuggingFace model
_snake_case : Optional[int] = get_efficientnet_config(UpperCAmelCase )
_snake_case : Dict = EfficientNetForImageClassification(UpperCAmelCase ).eval()
_snake_case : int = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_snake_case : Optional[int] = rename_keys(UpperCAmelCase )
replace_params(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Initialize preprocessor and preprocess input image
_snake_case : Optional[Any] = convert_image_processor(UpperCAmelCase )
_snake_case : List[str] = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_snake_case : Tuple = hf_model(**UpperCAmelCase )
_snake_case : str = outputs.logits.detach().numpy()
# Original model inference
_snake_case : List[Any] = False
_snake_case : Optional[Any] = CONFIG_MAP[model_name]["image_size"]
_snake_case : Optional[int] = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_snake_case : List[Any] = image.img_to_array(UpperCAmelCase )
_snake_case : Optional[int] = np.expand_dims(UpperCAmelCase , axis=0 )
_snake_case : int = original_model.predict(UpperCAmelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(UpperCAmelCase ):
os.mkdir(UpperCAmelCase )
# Save converted model and image processor
hf_model.save_pretrained(UpperCAmelCase )
preprocessor.save_pretrained(UpperCAmelCase )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
_snake_case : Optional[Any] = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(UpperCAmelCase )
hf_model.push_to_hub(UpperCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
__lowerCAmelCase :List[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 278
| 1
|
_snake_case : Union[str, Any] = '''Tobias Carryer'''
from time import time
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict=int(time() ) ) -> Tuple: # noqa: B008
__lowerCAmelCase = multiplier
__lowerCAmelCase = increment
__lowerCAmelCase = modulo
__lowerCAmelCase = seed
def lowercase ( self : Optional[int] ) -> str:
__lowerCAmelCase = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
_snake_case : Dict = LinearCongruentialGenerator(1664525, 1013904223, 2 << 31)
while True:
print(lcg.next_number())
| 53
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Dict = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
SCREAMING_SNAKE_CASE :Optional[int] = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
SCREAMING_SNAKE_CASE :List[str] = '''</w>'''
SCREAMING_SNAKE_CASE :int = '''@@ '''
def _lowerCAmelCase ( lowerCAmelCase_ :int )->int:
'''simple docstring'''
snake_case_ = set()
snake_case_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case_ = char
return pairs
# Speech2Text2 has no max input length
SCREAMING_SNAKE_CASE :Tuple = {'''facebook/s2t-wav2vec2-large-en-de''': 10_24}
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any]="<s>" , _lowerCAmelCase : Tuple="<pad>" , _lowerCAmelCase : Union[str, Any]="</s>" , _lowerCAmelCase : str="<unk>" , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ) -> List[str]:
"""simple docstring"""
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , **_lowerCAmelCase , )
snake_case_ = do_lower_case
with open(_lowerCAmelCase , encoding="utf-8" ) as vocab_handle:
snake_case_ = json.load(_lowerCAmelCase )
snake_case_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
snake_case_ = None
snake_case_ = None
else:
with open(_lowerCAmelCase , encoding="utf-8" ) as merges_handle:
snake_case_ = merges_handle.read().split("\n" )[:-1]
snake_case_ = [tuple(merge.split()[:2] ) for merge in merges]
snake_case_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
snake_case_ = {}
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowerCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
snake_case_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
snake_case_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
snake_case_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case_ , snake_case_ = bigram
snake_case_ = []
snake_case_ = 0
while i < len(_lowerCAmelCase ):
try:
snake_case_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case_ = tuple(_lowerCAmelCase )
snake_case_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
snake_case_ = get_pairs(_lowerCAmelCase )
snake_case_ = " ".join(_lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
snake_case_ = "\n" + BPE_TOKEN_MERGES
if word.endswith(_lowerCAmelCase ):
snake_case_ = word.replace(_lowerCAmelCase , "" )
snake_case_ = word.replace(" " , _lowerCAmelCase )
snake_case_ = word
return word
def lowerCAmelCase__ ( self : List[Any] , _lowerCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
snake_case_ = text.lower()
snake_case_ = text.split()
snake_case_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(" " ) ) )
return split_tokens
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : int ) -> str:
"""simple docstring"""
snake_case_ = self.decoder.get(_lowerCAmelCase , self.unk_token )
return result
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[str] ) -> str:
"""simple docstring"""
snake_case_ = " ".join(_lowerCAmelCase )
# make sure @@ tokens are concatenated
snake_case_ = "".join(string.split(_lowerCAmelCase ) )
return string
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + "\n" )
snake_case_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case_ = token_index
writer.write(" ".join(_lowerCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 283
| 0
|
from __future__ import annotations
import math
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> list:
if len(_UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(_UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
_a = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_UpperCamelCase ) )
]
def __snake_case ( _UpperCamelCase ) -> tuple[list, list, list, list]:
if len(_UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
_a = len(_UpperCamelCase )
_a = matrix_length // 2
_a = [[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase )]
_a = [
[a[i][j] for j in range(_UpperCamelCase , _UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )
]
_a = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase )]
_a = [[a[i][j] for j in range(_UpperCamelCase )] for i in range(_UpperCamelCase , _UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def __snake_case ( _UpperCamelCase ) -> tuple[int, int]:
return len(_UpperCamelCase ), len(matrix[0] )
def __snake_case ( _UpperCamelCase ) -> None:
print('''\n'''.join(str(_UpperCamelCase ) for line in matrix ) )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> list:
if matrix_dimensions(_UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(_UpperCamelCase , _UpperCamelCase )
_a , _a , _a , _a = split_matrix(_UpperCamelCase )
_a , _a , _a , _a = split_matrix(_UpperCamelCase )
_a = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
_a = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
_a = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
_a = actual_strassen(_UpperCamelCase , matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) )
_a = actual_strassen(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_a = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_a = actual_strassen(matrix_subtraction(_UpperCamelCase , _UpperCamelCase ) , matrix_addition(_UpperCamelCase , _UpperCamelCase ) )
_a = matrix_addition(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
_a = matrix_addition(_UpperCamelCase , _UpperCamelCase )
_a = matrix_addition(_UpperCamelCase , _UpperCamelCase )
_a = matrix_subtraction(matrix_subtraction(matrix_addition(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) , _UpperCamelCase )
# construct the new matrix from our 4 quadrants
_a = []
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> list:
if matrix_dimensions(_UpperCamelCase )[1] != matrix_dimensions(_UpperCamelCase )[0]:
_a = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(_UpperCamelCase )
_a = matrix_dimensions(_UpperCamelCase )
_a = matrix_dimensions(_UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
_a = max(*_UpperCamelCase , *_UpperCamelCase )
_a = int(math.pow(2 , math.ceil(math.loga(_UpperCamelCase ) ) ) )
_a = matrixa
_a = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
_a = actual_strassen(_UpperCamelCase , _UpperCamelCase )
# Removing the additional zeros
for i in range(0 , _UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase :Optional[int] = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase :Dict = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[str] = logging.get_logger(__name__)
lowerCamelCase :List[str] = {}
class UpperCAmelCase ( __snake_case ):
a: str = "llama"
a: List[str] = ["past_key_values"]
def __init__( self: Tuple , __UpperCamelCase: Optional[Any]=3_2000 , __UpperCamelCase: Optional[int]=4096 , __UpperCamelCase: Union[str, Any]=1_1008 , __UpperCamelCase: str=32 , __UpperCamelCase: List[str]=32 , __UpperCamelCase: Tuple=None , __UpperCamelCase: Dict="silu" , __UpperCamelCase: Any=2048 , __UpperCamelCase: Optional[int]=0.0_2 , __UpperCamelCase: int=1E-6 , __UpperCamelCase: List[Any]=True , __UpperCamelCase: List[str]=0 , __UpperCamelCase: Union[str, Any]=1 , __UpperCamelCase: str=2 , __UpperCamelCase: int=1 , __UpperCamelCase: Optional[Any]=False , __UpperCamelCase: int=None , **__UpperCamelCase: Optional[int] , ):
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = intermediate_size
_a = num_hidden_layers
_a = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
_a = num_attention_heads
_a = num_key_value_heads
_a = hidden_act
_a = initializer_range
_a = rms_norm_eps
_a = pretraining_tp
_a = use_cache
_a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , tie_word_embeddings=__UpperCamelCase , **__UpperCamelCase , )
def _A ( self: Any ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"got {self.rope_scaling}" )
_a = self.rope_scaling.get('''type''' , __UpperCamelCase )
_a = self.rope_scaling.get('''factor''' , __UpperCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__UpperCamelCase , __UpperCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 346
| 0
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def a (lowerCAmelCase__ ):
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
"""repo_id""": str(lowerCAmelCase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def a (lowerCAmelCase__ ):
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ["""WORLD_SIZE"""] )
__a = int(os.environ["""N_GPU_NODE"""] )
__a = int(os.environ["""RANK"""] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def a (lowerCAmelCase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 99
|
"""simple docstring"""
from collections.abc import Callable
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = a
A__ = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('could not find root in given interval.' )
else:
A__ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
A__ = mid
else:
A__ = mid
A__ = start + (end - start) / 2.0
return mid
def __lowerCamelCase ( lowerCAmelCase__ ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 260
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : Optional[Union[str, Path]] = None
lowercase_ : bool = False
lowercase_ : bool = False
lowercase_ : bool = False
lowercase_ : Optional[Dict] = None
lowercase_ : Optional[str] = None
lowercase_ : bool = False
lowercase_ : bool = False
lowercase_ : bool = False
lowercase_ : bool = True
lowercase_ : Optional[int] = None
lowercase_ : int = 1
lowercase_ : Optional[Union[str, bool]] = None
lowercase_ : bool = False
lowercase_ : Optional[Dict] = None
lowercase_ : Optional[str] = None
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.__class__(**{k: copy.deepcopy(snake_case_ ) for k, v in self.__dict__.items()} )
| 720
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCamelCase_ : Tuple = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
lowerCamelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 302
| 0
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = "arrow" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Dict = load_from_cache_file
lowerCAmelCase__ :int = file_format
lowerCAmelCase__ :int = Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def snake_case ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ :Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 93
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 71
| 0
|
def snake_case__ ( UpperCAmelCase : str ):
lowerCAmelCase__ :Optional[Any] = 0
for ch in input_str:
lowerCAmelCase__ :int = ord(UpperCAmelCase )
lowerCAmelCase__ :List[Any] = pow(2 , UpperCAmelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = ['''vqvae''']
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , mel=_lowerCAmelCase , vqvae=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
return 50 if isinstance(self.scheduler , _lowerCAmelCase ) else 1_000
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = 0 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase=True , ):
'''simple docstring'''
lowerCAmelCase__ :str = steps or self.get_default_steps()
self.scheduler.set_timesteps(_lowerCAmelCase )
lowerCAmelCase__ :Dict = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase__ :Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase__ :Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_lowerCAmelCase , device=self.device , )
lowerCAmelCase__ :Union[str, Any] = noise
lowerCAmelCase__ :Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Dict = self.mel.audio_slice_to_image(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase__ :Tuple = (input_image / 255) * 2 - 1
lowerCAmelCase__ :Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase__ :str = self.vqvae.encode(torch.unsqueeze(_lowerCAmelCase , 0 ) ).latent_dist.sample(
generator=_lowerCAmelCase )[0]
lowerCAmelCase__ :Dict = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase__ :Dict = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase__ :Optional[Any] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase__ :Dict = int(mask_start_secs * pixels_per_second )
lowerCAmelCase__ :Tuple = int(mask_end_secs * pixels_per_second )
lowerCAmelCase__ :str = self.scheduler.add_noise(_lowerCAmelCase , _lowerCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _lowerCAmelCase ):
lowerCAmelCase__ :Optional[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["sample"]
else:
lowerCAmelCase__ :Dict = self.unet(_lowerCAmelCase , _lowerCAmelCase )["sample"]
if isinstance(self.scheduler , _lowerCAmelCase ):
lowerCAmelCase__ :Any = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , eta=_lowerCAmelCase , generator=_lowerCAmelCase , )["prev_sample"]
else:
lowerCAmelCase__ :List[str] = self.scheduler.step(
model_output=_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
lowerCAmelCase__ :List[Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase__ :Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase__ :Any = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase__ :List[Any] = self.vqvae.decode(_lowerCAmelCase )["sample"]
lowerCAmelCase__ :Dict = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ :Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase__ :Optional[int] = (images * 255).round().astype("uint8" )
lowerCAmelCase__ :Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_lowerCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
lowerCAmelCase__ :Optional[Any] = [self.mel.image_to_audio(_lowerCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_lowerCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_lowerCAmelCase ) )
@torch.no_grad()
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = 50 ):
'''simple docstring'''
assert isinstance(self.scheduler , _lowerCAmelCase )
self.scheduler.set_timesteps(_lowerCAmelCase )
lowerCAmelCase__ :Any = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase__ :Dict = (sample / 255) * 2 - 1
lowerCAmelCase__ :Optional[Any] = torch.Tensor(_lowerCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase__ :List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase__ :Any = self.scheduler.alphas_cumprod[t]
lowerCAmelCase__ :List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase__ :List[str] = 1 - alpha_prod_t
lowerCAmelCase__ :List[Any] = self.unet(_lowerCAmelCase , _lowerCAmelCase )["sample"]
lowerCAmelCase__ :int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase__ :List[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase__ :Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def snake_case_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = acos(torch.dot(torch.flatten(_lowerCAmelCase ) , torch.flatten(_lowerCAmelCase ) ) / torch.norm(_lowerCAmelCase ) / torch.norm(_lowerCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_lowerCAmelCase ) + sin(alpha * theta ) * xa / sin(_lowerCAmelCase )
| 111
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
def a_ ( _UpperCAmelCase : str ) -> bytes:
__snake_case : Tuple = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
__snake_case : Any = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_UpperCAmelCase ).content
if __name__ == "__main__":
A__ : Tuple = input('''Enter Video/IGTV url: ''').strip()
A__ : Optional[int] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""")
| 286
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = DebertaTokenizer
A__ = True
A__ = DebertaTokenizerFast
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__snake_case : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
__snake_case : List[str] = dict(zip(__a , range(len(__a ) ) ) )
__snake_case : int = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__snake_case : Any = {'unk_token': '[UNK]'}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__snake_case : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__a ) )
def A_ ( self : Optional[int] , **__a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__a )
def A_ ( self : Optional[int] , __a : Dict ) -> List[str]:
'''simple docstring'''
__snake_case : Union[str, Any] = 'lower newer'
__snake_case : List[Any] = 'lower newer'
return input_text, output_text
def A_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : str = self.get_tokenizer()
__snake_case : List[str] = 'lower newer'
__snake_case : Tuple = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
__snake_case : Optional[int] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__snake_case : str = tokens + [tokenizer.unk_token]
__snake_case : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
__snake_case : List[str] = self.get_tokenizer()
__snake_case : List[str] = tokenizer('Hello' , 'World' )
__snake_case : int = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __a )
@slow
def A_ ( self : Dict ) -> str:
'''simple docstring'''
__snake_case : str = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
__snake_case : str = tokenizer.encode('sequence builders' , add_special_tokens=__a )
__snake_case : Any = tokenizer.encode('multi-sequence build' , add_special_tokens=__a )
__snake_case : List[Any] = tokenizer.encode(
'sequence builders' , add_special_tokens=__a , add_prefix_space=__a )
__snake_case : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__a , add_prefix_space=__a )
__snake_case : int = tokenizer.build_inputs_with_special_tokens(__a )
__snake_case : Tuple = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__snake_case : int = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__snake_case : List[Any] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
__snake_case : int = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
__snake_case : Union[str, Any] = tokenizer(__a , padding=__a )
__snake_case : List[str] = [tokenizer.decode(__a , skip_special_tokens=__a ) for seq in encoding['input_ids']]
# fmt: off
__snake_case : Optional[Any] = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__snake_case : int = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __a )
for expected, decoded in zip(__a , __a ):
self.assertEqual(__a , __a )
| 286
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : int = '''canine'''
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=7_6_8 , UpperCAmelCase__ : Dict=1_2 , UpperCAmelCase__ : Optional[int]=1_2 , UpperCAmelCase__ : List[str]=3_0_7_2 , UpperCAmelCase__ : Any="gelu" , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Union[str, Any]=1_6_3_8_4 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Dict=1E-12 , UpperCAmelCase__ : List[Any]=0 , UpperCAmelCase__ : List[str]=0XE_0_0_0 , UpperCAmelCase__ : Union[str, Any]=0XE_0_0_1 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : List[Any]=8 , UpperCAmelCase__ : List[str]=1_6_3_8_4 , UpperCAmelCase__ : Union[str, Any]=1_2_8 , **UpperCAmelCase__ : Dict , ) -> Tuple:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
# Character config:
lowerCAmelCase = downsampling_rate
lowerCAmelCase = upsampling_kernel_size
lowerCAmelCase = num_hash_functions
lowerCAmelCase = num_hash_buckets
lowerCAmelCase = local_transformer_stride
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case ={
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513
| 0
|
from __future__ import annotations
from math import pi
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
|
class UpperCAmelCase_ :
def __init__( self , _lowerCAmelCase ):
# we need a list not a string, so do something to change the type
UpperCAmelCase__ : Dict = arr.split(""",""" )
def __UpperCAmelCase ( self ):
UpperCAmelCase__ : Tuple = [int(self.array[0] )] * len(self.array )
UpperCAmelCase__ : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
UpperCAmelCase__ : Tuple = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
UpperCAmelCase__ : Union[str, Any] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE__ : Dict = SubArray(whole_array)
SCREAMING_SNAKE_CASE__ : Dict = array.solve_sub_array()
print(("""the results is:""", re))
| 79
| 1
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = FlaxAutoencoderKL
@property
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Any = 4
snake_case__ : Optional[Any] = 3
snake_case__ : Optional[int] = (3_2, 3_2)
snake_case__ : Optional[int] = jax.random.PRNGKey(0 )
snake_case__ : Union[str, Any] = jax.random.uniform(snake_case_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case__ : Dict = self.dummy_input
return init_dict, inputs_dict
| 502
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , '''width_multiplier''' ) )
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Dict=1_3 , snake_case_ : Any=6_4 , snake_case_ : Dict=2 , snake_case_ : Optional[int]=3 , snake_case_ : str="swish" , snake_case_ : str=3 , snake_case_ : Union[str, Any]=3_2 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Any=0.0_2 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : Dict=1_0 , snake_case_ : Optional[int]=None , snake_case_ : str=0.2_5 , snake_case_ : List[Any]=0.0 , snake_case_ : Optional[Any]=0.0 , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Dict = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Tuple = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case__ : Optional[int] = hidden_act
snake_case__ : int = conv_kernel_size
snake_case__ : Optional[int] = output_stride
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = use_labels
snake_case__ : Optional[Any] = is_training
snake_case__ : int = num_labels
snake_case__ : str = initializer_range
snake_case__ : Dict = scope
snake_case__ : Tuple = width_multiplier
snake_case__ : Optional[Any] = ffn_dropout
snake_case__ : Dict = attn_dropout
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __magic_name__ ( self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[Any] = MobileViTVaModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = self.num_labels
snake_case__ : Optional[Any] = MobileViTVaForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : int = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : int , snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : Any = MobileViTVaForSemanticSegmentation(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Optional[int] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __magic_name__ ( self : str ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : str = MobileViTVaModelTester(self )
snake_case__ : Union[str, Any] = MobileViTVaConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(snake_case_ )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Any = [*signature.parameters.keys()]
snake_case__ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
def check_hidden_states_output(snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Tuple ):
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Union[str, Any] = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(snake_case_ ) , snake_case_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Dict = 2
for i in range(len(snake_case_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : int = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case_ )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = MobileViTVaModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _a ( ):
"""simple docstring"""
snake_case__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
snake_case_ )
snake_case__ : Any = self.default_image_processor
snake_case__ : Tuple = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Tuple = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
snake_case__ : List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : Any = model.to(snake_case_ )
snake_case__ : Tuple = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : Union[str, Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**snake_case_ )
snake_case__ : Tuple = outputs.logits
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , snake_case_ )
snake_case__ : List[Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=snake_case_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case_ , atol=1e-4 ) )
@slow
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : List[str] = model.to(snake_case_ )
snake_case__ : Optional[int] = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
snake_case__ : str = prepare_img()
snake_case__ : str = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**snake_case_ )
snake_case__ : str = outputs.logits.detach().cpu()
snake_case__ : int = image_processor.post_process_semantic_segmentation(outputs=snake_case_ , target_sizes=[(5_0, 6_0)] )
snake_case__ : int = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , snake_case_ )
snake_case__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case_ )
snake_case__ : Any = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , snake_case_ )
| 502
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def UpperCamelCase_ ( ) -> Optional[Any]:
a__, a__ : Tuple = 9, 14 # noqa: F841
a__ : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : str = defaultdict(__a )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ : Union[str, Any] = mst(__a )
a__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ : List[str] = tuple(answer[:2] )
a__ : Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 37
|
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _a ( UpperCamelCase__ ):
def __init__( self: int , *UpperCamelCase_: str , UpperCamelCase_: List[str]=None , UpperCamelCase_: int=None , **UpperCamelCase_: Optional[Any] ) -> List[str]:
"""simple docstring"""
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = eval_examples
lowercase__ = post_process_function
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: Optional[Dataset] = None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: str = "eval" , **UpperCamelCase_: int , ) -> Dict[str, float]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = (
gen_kwargs['''max_length'''] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase__ = (
gen_kwargs['''num_beams'''] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase__ = gen_kwargs
lowercase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowercase__ = self.get_eval_dataloader(UpperCamelCase_ )
lowercase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
else:
lowercase__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase_ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ )
return metrics
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: List[str]=None , UpperCamelCase_: str = "test" , **UpperCamelCase_: Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = gen_kwargs.copy()
lowercase__ = self.get_test_dataloader(UpperCamelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase__ = self.compute_metrics
lowercase__ = None
lowercase__ = time.time()
lowercase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase__ = eval_loop(
UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , metric_key_prefix=UpperCamelCase_ , )
finally:
lowercase__ = compute_metrics
lowercase__ = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
UpperCamelCase_ , UpperCamelCase_ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase__ = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , '''predict''' )
lowercase__ = self.compute_metrics(UpperCamelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase__ = metrics.pop(UpperCamelCase_ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ )
| 43
| 0
|
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ) ->List[str]:
_UpperCAmelCase =split_dict._to_yaml_list()
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_UpperCAmelCase =SplitDict._from_yaml_list(_lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_UpperCAmelCase =None
# the split name of split_dict takes over the name of the split info object
_UpperCAmelCase =split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_lowerCamelCase ), SplitInfo(dataset_name="my_dataset" )] )
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ) ->Any:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_UpperCAmelCase =asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =1
_UpperCAmelCase =3
_UpperCAmelCase =(32, 32)
_UpperCAmelCase =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_snake_case )
return image
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_snake_case , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(_snake_case )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=_snake_case , )[0]
_UpperCAmelCase =image[0, -3:, -3:, -1]
_UpperCAmelCase =image_from_tuple[0, -3:, -3:, -1]
_UpperCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_UpperCAmelCase =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape[0] == 2
_UpperCAmelCase =torch.Generator(device=_snake_case ).manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.dummy_cond_unet_upscale
_UpperCAmelCase =DDPMScheduler()
_UpperCAmelCase =DDIMScheduler(prediction_type="v_prediction" )
_UpperCAmelCase =self.dummy_vae
_UpperCAmelCase =self.dummy_text_encoder
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase =self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCAmelCase =Image.fromarray(np.uinta(_snake_case ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
_UpperCAmelCase =unet.half()
_UpperCAmelCase =text_encoder.half()
# make sure here that pndm scheduler skips prk
_UpperCAmelCase =StableDiffusionUpscalePipeline(
unet=_snake_case , low_res_scheduler=_snake_case , scheduler=_snake_case , vae=_snake_case , text_encoder=_snake_case , tokenizer=_snake_case , max_noise_level=350 , )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase ="A painting of a squirrel eating a burger"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =sd_pipe(
[prompt] , image=_snake_case , generator=_snake_case , num_inference_steps=2 , output_type="np" , ).images
_UpperCAmelCase =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , output_type="np" , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , output_type="np" , )
_UpperCAmelCase =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def SCREAMING_SNAKE_CASE ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_UpperCAmelCase ="stabilityai/stable-diffusion-x4-upscaler"
_UpperCAmelCase =StableDiffusionUpscalePipeline.from_pretrained(
_snake_case , torch_dtype=torch.floataa , )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase ="a cat sitting on a park bench"
_UpperCAmelCase =torch.manual_seed(0 )
_UpperCAmelCase =pipe(
prompt=_snake_case , image=_snake_case , generator=_snake_case , num_inference_steps=5 , output_type="np" , )
_UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 592
| 0
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : List[str] = 0
def __A ( self ):
_lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Tuple = Path(a__ ) / """preprocessor_config.json"""
_lowerCAmelCase : List[Any] = Path(a__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(a__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(a__ , """w""" ) )
_lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Union[str, Any] = Path(a__ ) / """preprocessor_config.json"""
_lowerCAmelCase : str = Path(a__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(a__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(a__ , """w""" ) )
_lowerCAmelCase : Tuple = AutoImageProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_lowerCAmelCase : List[str] = Path(a__ ) / """preprocessor_config.json"""
_lowerCAmelCase : Union[str, Any] = Path(a__ ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(a__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(a__ , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_lowerCAmelCase : str = AutoImageProcessor.from_pretrained(a__ ).to_dict()
config_dict.pop("""image_processor_type""" )
_lowerCAmelCase : Dict = CLIPImageProcessor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
_lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
_lowerCAmelCase : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[Any] = Path(a__ ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(a__ , """w""" ) , )
_lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __A ( self ):
with self.assertRaisesRegex(
a__ , """clip-base is not a local folder and is not a valid model identifier""" ):
_lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("""clip-base""" )
def __A ( self ):
with self.assertRaisesRegex(
a__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
_lowerCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(a__ , revision="""aaaaaa""" )
def __A ( self ):
with self.assertRaisesRegex(
a__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
_lowerCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def __A ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
_lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=a__ )
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=a__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a__ )
_lowerCAmelCase : int = AutoImageProcessor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def __A ( self ):
try:
AutoConfig.register("""custom""" , a__ )
AutoImageProcessor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoImageProcessor.register(a__ , a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[Any] = Path(a__ ) / """preprocessor_config.json"""
_lowerCAmelCase : List[Any] = Path(a__ ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(a__ , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(a__ , """w""" ) )
_lowerCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(a__ )
_lowerCAmelCase : str = AutoImageProcessor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __A ( self ):
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = True
try:
AutoConfig.register("""custom""" , a__ )
AutoImageProcessor.register(a__ , a__ )
# If remote code is not set, the default is to use local
_lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=a__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=a__ )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(a__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 213
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
_UpperCamelCase : int = "ssube/stable-diffusion-x4-upscaler-onnx"
def __A ( self , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 128, 128) , rng=random.Random(a__ ) )
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
_lowerCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Dict = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : str = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = self.get_dummy_inputs()
_lowerCAmelCase : str = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def __A ( self ):
_lowerCAmelCase : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = self.get_dummy_inputs()
_lowerCAmelCase : Any = pipe(**a__ ).images
_lowerCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __A ( unittest.TestCase ):
@property
def __A ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ):
_lowerCAmelCase : str = ort.SessionOptions()
_lowerCAmelCase : Tuple = False
return options
def __A ( self ):
_lowerCAmelCase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Any = init_image.resize((128, 128) )
# using the PNDM scheduler by default
_lowerCAmelCase : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Optional[int] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[str] = torch.manual_seed(0 )
_lowerCAmelCase : Dict = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=10 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[str] = output.images
_lowerCAmelCase : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Dict = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def __A ( self ):
_lowerCAmelCase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
_lowerCAmelCase : Union[str, Any] = init_image.resize((128, 128) )
_lowerCAmelCase : int = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
_lowerCAmelCase : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=a__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : List[Any] = """A fantasy landscape, trending on artstation"""
_lowerCAmelCase : List[Any] = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=a__ , image=a__ , guidance_scale=7.5 , num_inference_steps=20 , generator=a__ , output_type="""np""" , )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
_lowerCAmelCase : Optional[Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 213
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="[UNK]" , _SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('return_tensors' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 705
|
__magic_name__ : List[str] = tuple[float, float, float]
__magic_name__ : Optional[int] = tuple[float, float, float]
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = end_pointa[0] - end_pointa[0]
UpperCamelCase = end_pointa[1] - end_pointa[1]
UpperCamelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> Vectorad:
"""simple docstring"""
UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return tuple(round(_UpperCamelCase , _UpperCamelCase) for x in vector) == (0, 0, 0)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 10) -> bool:
"""simple docstring"""
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
UpperCamelCase = create_vector(_UpperCamelCase , _UpperCamelCase)
return is_zero_vector(get_ad_vectors_cross(_UpperCamelCase , _UpperCamelCase) , _UpperCamelCase)
| 410
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Dict = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__a: Tuple = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__a: Dict = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase )[0]
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
lowercase__ : int = _readaa(UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
lowercase__ : Optional[Any] = _readaa(UpperCAmelCase )
lowercase__ : List[Any] = _readaa(UpperCAmelCase )
lowercase__ : Any = _readaa(UpperCAmelCase )
lowercase__ : Tuple = bytestream.read(rows * cols * num_images )
lowercase__ : Union[str, Any] = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
lowercase__ : int = data.reshape(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 1 )
return data
@deprecated(UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : int = labels_dense.shape[0]
lowercase__ : List[Any] = numpy.arange(UpperCAmelCase ) * num_classes
lowercase__ : str = numpy.zeros((num_labels, num_classes) )
lowercase__ : int = 1
return labels_one_hot
@deprecated(UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase ) as bytestream:
lowercase__ : Tuple = _readaa(UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
lowercase__ : int = _readaa(UpperCAmelCase )
lowercase__ : Union[str, Any] = bytestream.read(UpperCAmelCase )
lowercase__ : str = numpy.frombuffer(UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase , UpperCAmelCase )
return labels
class UpperCAmelCase :
'''simple docstring'''
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=dtypes.floataa , __lowerCAmelCase=True , __lowerCAmelCase=None , ) -> Any:
lowercase__ , lowercase__ : str = random_seed.get_seed(__lowerCAmelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ : List[str] = dtypes.as_dtype(__lowerCAmelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
lowercase__ : str = 10000
lowercase__ : Any = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
lowercase__ : Any = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ : Optional[int] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ : List[Any] = images.astype(numpy.floataa )
lowercase__ : Any = numpy.multiply(__lowerCAmelCase , 1.0 / 2_5_5.0 )
lowercase__ : int = images
lowercase__ : int = labels
lowercase__ : Any = 0
lowercase__ : int = 0
@property
def _lowerCAmelCase( self ) -> str:
return self._images
@property
def _lowerCAmelCase( self ) -> Tuple:
return self._labels
@property
def _lowerCAmelCase( self ) -> Tuple:
return self._num_examples
@property
def _lowerCAmelCase( self ) -> Tuple:
return self._epochs_completed
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ) -> List[str]:
if fake_data:
lowercase__ : Optional[int] = [1] * 784
lowercase__ : Tuple = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__lowerCAmelCase )],
[fake_label for _ in range(__lowerCAmelCase )],
)
lowercase__ : Optional[int] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCAmelCase )
lowercase__ : Optional[Any] = self.images[perma]
lowercase__ : str = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ : Optional[int] = self._num_examples - start
lowercase__ : Any = self._images[start : self._num_examples]
lowercase__ : List[str] = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(__lowerCAmelCase )
lowercase__ : Optional[int] = self.images[perm]
lowercase__ : List[Any] = self.labels[perm]
# Start next epoch
lowercase__ : List[Any] = 0
lowercase__ : Optional[Any] = batch_size - rest_num_examples
lowercase__ : str = self._index_in_epoch
lowercase__ : List[str] = self._images[start:end]
lowercase__ : str = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase , '''Please write your own downloading logic.''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if not gfile.Exists(UpperCAmelCase ):
gfile.MakeDirs(UpperCAmelCase )
lowercase__ : List[str] = os.path.join(UpperCAmelCase , UpperCAmelCase )
if not gfile.Exists(UpperCAmelCase ):
urllib.request.urlretrieve(UpperCAmelCase , UpperCAmelCase ) # noqa: S310
with gfile.GFile(UpperCAmelCase ) as f:
lowercase__ : Tuple = f.size()
print('''Successfully downloaded''' , UpperCAmelCase , UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=dtypes.floataa , UpperCAmelCase=True , UpperCAmelCase=5000 , UpperCAmelCase=None , UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase , one_hot=UpperCAmelCase , dtype=UpperCAmelCase , seed=UpperCAmelCase )
lowercase__ : Any = fake()
lowercase__ : Optional[int] = fake()
lowercase__ : Optional[int] = fake()
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
if not source_url: # empty string check
lowercase__ : Tuple = DEFAULT_SOURCE_URL
lowercase__ : Tuple = '''train-images-idx3-ubyte.gz'''
lowercase__ : List[str] = '''train-labels-idx1-ubyte.gz'''
lowercase__ : Optional[int] = '''t10k-images-idx3-ubyte.gz'''
lowercase__ : str = '''t10k-labels-idx1-ubyte.gz'''
lowercase__ : Optional[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Optional[int] = _extract_images(UpperCAmelCase )
lowercase__ : Optional[Any] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Union[str, Any] = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
lowercase__ : Any = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_images_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Tuple = _extract_images(UpperCAmelCase )
lowercase__ : Optional[int] = _maybe_download(
UpperCAmelCase , UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase , '''rb''' ) as f:
lowercase__ : Tuple = _extract_labels(UpperCAmelCase , one_hot=UpperCAmelCase )
if not 0 <= validation_size <= len(UpperCAmelCase ):
lowercase__ : Optional[int] = (
'''Validation size should be between 0 and '''
F"""{len(UpperCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(UpperCAmelCase )
lowercase__ : Optional[int] = train_images[:validation_size]
lowercase__ : List[str] = train_labels[:validation_size]
lowercase__ : Tuple = train_images[validation_size:]
lowercase__ : Optional[int] = train_labels[validation_size:]
lowercase__ : List[str] = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
lowercase__ : str = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
lowercase__ : int = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
lowercase__ : List[Any] = _DataSet(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
return _Datasets(train=UpperCAmelCase , validation=UpperCAmelCase , test=UpperCAmelCase )
| 152
| 0
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
debug_launcher(test_script.main )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
debug_launcher(test_ops.main )
| 17
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 17
| 1
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=3 , snake_case_=30 , snake_case_=400 , snake_case_=True , snake_case_=None , snake_case_=True , snake_case_=[0.5, 0.5, 0.5] , snake_case_=[0.5, 0.5, 0.5] , snake_case_=True , snake_case_=1 / 255 , snake_case_=True , ):
'''simple docstring'''
__UpperCAmelCase: List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__UpperCAmelCase: Tuple = parent
__UpperCAmelCase: List[str] = batch_size
__UpperCAmelCase: List[str] = num_channels
__UpperCAmelCase: Union[str, Any] = min_resolution
__UpperCAmelCase: Optional[int] = max_resolution
__UpperCAmelCase: Dict = do_resize
__UpperCAmelCase: Any = size
__UpperCAmelCase: Optional[int] = do_normalize
__UpperCAmelCase: Any = image_mean
__UpperCAmelCase: List[Any] = image_std
__UpperCAmelCase: Any = do_rescale
__UpperCAmelCase: int = rescale_factor
__UpperCAmelCase: Any = do_pad
def lowercase_ ( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowercase_ ( self , snake_case_ , snake_case_=False ):
'''simple docstring'''
if not batched:
__UpperCAmelCase: Union[str, Any] = image_inputs[0]
if isinstance(snake_case_ , Image.Image ):
__UpperCAmelCase, __UpperCAmelCase: Dict = image.size
else:
__UpperCAmelCase, __UpperCAmelCase: List[Any] = image.shape[1], image.shape[2]
if w < h:
__UpperCAmelCase: str = int(self.size["""shortest_edge"""] * h / w )
__UpperCAmelCase: List[str] = self.size["""shortest_edge"""]
elif w > h:
__UpperCAmelCase: List[Any] = self.size["""shortest_edge"""]
__UpperCAmelCase: Any = int(self.size["""shortest_edge"""] * w / h )
else:
__UpperCAmelCase: Optional[Any] = self.size["""shortest_edge"""]
__UpperCAmelCase: Dict = self.size["""shortest_edge"""]
else:
__UpperCAmelCase: Union[str, Any] = []
for image in image_inputs:
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCAmelCase: str = max(snake_case_ , key=lambda snake_case_ : item[0] )[0]
__UpperCAmelCase: Any = max(snake_case_ , key=lambda snake_case_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ConditionalDetrImageProcessor if is_vision_available() else None
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = ConditionalDetrImageProcessingTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """image_mean""" ) )
self.assertTrue(hasattr(snake_case_ , """image_std""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , snake_case_ )
__UpperCAmelCase: str = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=snake_case_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
__UpperCAmelCase: int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase, __UpperCAmelCase: str = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase, __UpperCAmelCase: Tuple = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
__UpperCAmelCase: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
__UpperCAmelCase: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase, __UpperCAmelCase: List[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase: List[str] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase, __UpperCAmelCase: Dict = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase: Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase, __UpperCAmelCase: List[Any] = self.image_processor_tester.get_expected_values(snake_case_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCAmelCase: Any = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
__UpperCAmelCase, __UpperCAmelCase: List[str] = self.image_processor_tester.get_expected_values(snake_case_ , batched=snake_case_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase: List[str] = json.loads(f.read() )
__UpperCAmelCase: Optional[Any] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
__UpperCAmelCase: Tuple = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
__UpperCAmelCase: int = image_processing(images=snake_case_ , annotations=snake_case_ , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase: Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case_ )
__UpperCAmelCase: Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case_ , atol=1e-4 ) )
# verify area
__UpperCAmelCase: str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case_ ) )
# verify boxes
__UpperCAmelCase: int = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case_ )
__UpperCAmelCase: List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case_ , atol=1e-3 ) )
# verify image_id
__UpperCAmelCase: Dict = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case_ ) )
# verify is_crowd
__UpperCAmelCase: int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case_ ) )
# verify class_labels
__UpperCAmelCase: List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case_ ) )
# verify orig_size
__UpperCAmelCase: Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case_ ) )
# verify size
__UpperCAmelCase: Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case_ ) )
@slow
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__UpperCAmelCase: Any = json.loads(f.read() )
__UpperCAmelCase: str = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
__UpperCAmelCase: Union[str, Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__UpperCAmelCase: int = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
__UpperCAmelCase: Dict = image_processing(images=snake_case_ , annotations=snake_case_ , masks_path=snake_case_ , return_tensors="""pt""" )
# verify pixel values
__UpperCAmelCase: List[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , snake_case_ )
__UpperCAmelCase: Optional[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , snake_case_ , atol=1e-4 ) )
# verify area
__UpperCAmelCase: List[Any] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , snake_case_ ) )
# verify boxes
__UpperCAmelCase: int = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , snake_case_ )
__UpperCAmelCase: Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , snake_case_ , atol=1e-3 ) )
# verify image_id
__UpperCAmelCase: Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , snake_case_ ) )
# verify is_crowd
__UpperCAmelCase: Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , snake_case_ ) )
# verify class_labels
__UpperCAmelCase: Dict = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , snake_case_ ) )
# verify masks
__UpperCAmelCase: List[Any] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , snake_case_ )
# verify orig_size
__UpperCAmelCase: Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , snake_case_ ) )
# verify size
__UpperCAmelCase: int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , snake_case_ ) )
| 523
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase__ ( ) -> Optional[Any]:
__UpperCAmelCase: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--src_path""" , type=_lowercase , default="""biencoder-nq-dev.json""" , help="""Path to raw DPR training data""" , )
parser.add_argument(
"""--evaluation_set""" , type=_lowercase , help="""where to store parsed evaluation_set file""" , )
parser.add_argument(
"""--gold_data_path""" , type=_lowercase , help="""where to store parsed gold_data_path file""" , )
__UpperCAmelCase: Optional[int] = parser.parse_args()
with open(args.src_path , """r""" ) as src_file, open(args.evaluation_set , """w""" ) as eval_file, open(
args.gold_data_path , """w""" ) as gold_file:
__UpperCAmelCase: List[Any] = json.load(_lowercase )
for dpr_record in tqdm(_lowercase ):
__UpperCAmelCase: Tuple = dpr_record["""question"""]
__UpperCAmelCase: str = [context["""title"""] for context in dpr_record["""positive_ctxs"""]]
eval_file.write(question + """\n""" )
gold_file.write("""\t""".join(_lowercase ) + """\n""" )
if __name__ == "__main__":
main()
| 523
| 1
|
from jiwer import compute_measures
import datasets
snake_case__ : List[Any] = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
snake_case__ : Tuple = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
snake_case__ : Any = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def SCREAMING_SNAKE_CASE ( self , _snake_case=None , _snake_case=None , _snake_case=False ):
if concatenate_texts:
return compute_measures(_snake_case , _snake_case )["wer"]
else:
_UpperCAmelCase =0
_UpperCAmelCase =0
for prediction, reference in zip(_snake_case , _snake_case ):
_UpperCAmelCase =compute_measures(_snake_case , _snake_case )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 592
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
snake_case__ : List[Any] = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
snake_case__ : Optional[Any] = concatenate_datasets
snake_case__ : str = DownloadConfig
snake_case__ : Optional[int] = DownloadManager
snake_case__ : List[Any] = DownloadMode
snake_case__ : List[str] = DownloadConfig
snake_case__ : List[str] = DownloadMode
snake_case__ : List[Any] = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 592
| 1
|
"""simple docstring"""
from math import pi, sqrt
def lowerCamelCase__ ( __snake_case ) -> float:
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(__snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def lowerCamelCase__ ( ) -> None:
"""simple docstring"""
assert gamma(0.5 ) == sqrt(__snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_a = 1.0
while num:
_a = float(input("""Gamma of: """))
print(F"""gamma({num}) = {gamma(num)}""")
print("""\nEnter 0 to exit...""")
| 19
|
"""simple docstring"""
from itertools import product
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> list[int]:
a__ = sides_number
a__ = max_face_number * dice_number
a__ = [0] * (max_total + 1)
a__ = 1
a__ = range(UpperCamelCase , max_face_number + 1 )
for dice_numbers in product(UpperCamelCase , repeat=UpperCamelCase ):
a__ = sum(UpperCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def __magic_name__ ( ) -> float:
a__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
a__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
a__ = 0
a__ = 9
a__ = 4 * 9
a__ = 6
for peter_total in range(UpperCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
a__ = (4**9) * (6**6)
a__ = peter_wins_count / total_games_number
a__ = round(UpperCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 273
| 0
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class a_ ( UpperCamelCase_ ):
_snake_case = (KDPMaDiscreteScheduler,)
_snake_case = 10
def SCREAMING_SNAKE_CASE__ (self , **__a) -> List[str]:
"""simple docstring"""
__snake_case : Any = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**__a)
return config
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__a)
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=__a , beta_end=__a)
def SCREAMING_SNAKE_CASE__ (self) -> Tuple:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a)
def SCREAMING_SNAKE_CASE__ (self) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a)
def SCREAMING_SNAKE_CASE__ (self) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : Optional[int] = self.get_scheduler_config(prediction_type='v_prediction')
__snake_case : str = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
__snake_case : str = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Dict = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
__snake_case : Any = scheduler.scale_model_input(__a , __a)
__snake_case : str = model(__a , __a)
__snake_case : int = scheduler.step(__a , __a , __a)
__snake_case : Tuple = output.prev_sample
__snake_case : Optional[Any] = torch.sum(torch.abs(__a))
__snake_case : List[str] = torch.mean(torch.abs(__a))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_934E-07) < 1E-2
assert abs(result_mean.item() - 6.1_112E-10) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_428_650_170_972E-07) < 1E-2
assert abs(result_mean.item() - 0.0_002) < 1E-3
def SCREAMING_SNAKE_CASE__ (self) -> List[Any]:
"""simple docstring"""
if torch_device == "mps":
return
__snake_case : str = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps)
__snake_case : List[Any] = self.dummy_model()
__snake_case : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__snake_case : Tuple = sample.to(__a)
for i, t in enumerate(scheduler.timesteps):
__snake_case : Tuple = scheduler.scale_model_input(__a , __a)
__snake_case : Dict = model(__a , __a)
__snake_case : Dict = scheduler.step(__a , __a , __a)
__snake_case : Optional[int] = output.prev_sample
__snake_case : str = torch.sum(torch.abs(__a))
__snake_case : Tuple = torch.mean(torch.abs(__a))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
def SCREAMING_SNAKE_CASE__ (self) -> Optional[Any]:
"""simple docstring"""
if torch_device == "mps":
return
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : str = self.get_scheduler_config()
__snake_case : Dict = scheduler_class(**__a)
scheduler.set_timesteps(self.num_inference_steps , device=__a)
__snake_case : str = self.dummy_model()
__snake_case : Tuple = self.dummy_sample_deter.to(__a) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__snake_case : Optional[Any] = scheduler.scale_model_input(__a , __a)
__snake_case : str = model(__a , __a)
__snake_case : str = scheduler.step(__a , __a , __a)
__snake_case : Optional[Any] = output.prev_sample
__snake_case : int = torch.sum(torch.abs(__a))
__snake_case : Dict = torch.mean(torch.abs(__a))
if str(__a).startswith('cpu'):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125) < 1E-2
assert abs(result_mean.item() - 0.0_266) < 1E-3
| 705
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 61
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Tuple , __A :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
def __iter__( self :Union[str, Any] ) -> int:
"""simple docstring"""
for element in self.data:
yield element
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int=True ):
SCREAMING_SNAKE_CASE__ = Accelerator(even_batches=UpperCamelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: bool = False ):
if iterable:
SCREAMING_SNAKE_CASE__ = DummyIterableDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
else:
SCREAMING_SNAKE_CASE__ = TensorDataset(torch.as_tensor(range(UpperCamelCase__ ) ) )
SCREAMING_SNAKE_CASE__ = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
return dl
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Accelerator , UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: List[int] , UpperCamelCase__: List[int] , ):
SCREAMING_SNAKE_CASE__ = create_dataloader(accelerator=UpperCamelCase__ , dataset_size=UpperCamelCase__ , batch_size=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
UpperCamelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = ddp_model(batch[0].float() )
SCREAMING_SNAKE_CASE__ = output.sum()
loss.backward()
batch_idxs.append(UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Optional[Any] ):
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = train_dl.batch_sampler.even_batches
SCREAMING_SNAKE_CASE__ = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = create_accelerator(even_batches=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
SCREAMING_SNAKE_CASE__ = torch.nn.Linear(1 , 1 )
SCREAMING_SNAKE_CASE__ = accelerator.prepare(UpperCamelCase__ )
create_dataloader(UpperCamelCase__ , dataset_size=3 , batch_size=1 , iterable=UpperCamelCase__ )
with warnings.catch_warnings(record=UpperCamelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=UpperCamelCase__ ):
pass
assert issubclass(w[-1].category , UpperCamelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
SCREAMING_SNAKE_CASE__ = accelerator.state.distributed_type
SCREAMING_SNAKE_CASE__ = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = original_state
if __name__ == "__main__":
main()
| 6
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( _snake_case ):
'''simple docstring'''
UpperCamelCase__ : torch.FloatTensor
class snake_case ( _snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self : Tuple , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 3 , lowerCamelCase_ : Tuple[str] = ("DownEncoderBlock2D",) , lowerCamelCase_ : Tuple[str] = ("UpDecoderBlock2D",) , lowerCamelCase_ : Tuple[int] = (64,) , lowerCamelCase_ : int = 1 , lowerCamelCase_ : str = "silu" , lowerCamelCase_ : int = 3 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : int = 256 , lowerCamelCase_ : int = 32 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : float = 0.18215 , lowerCamelCase_ : str = "group" , ) ->str:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase__ = Encoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , down_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , double_z=lowerCamelCase_ , )
UpperCAmelCase__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
UpperCAmelCase__ = VectorQuantizer(lowerCamelCase_ , lowerCamelCase_ , beta=0.25 , remap=lowerCamelCase_ , sane_index_shape=lowerCamelCase_ )
UpperCAmelCase__ = nn.Convad(lowerCamelCase_ , lowerCamelCase_ , 1 )
# pass init params to Decoder
UpperCAmelCase__ = Decoder(
in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , up_block_types=lowerCamelCase_ , block_out_channels=lowerCamelCase_ , layers_per_block=lowerCamelCase_ , act_fn=lowerCamelCase_ , norm_num_groups=lowerCamelCase_ , norm_type=lowerCamelCase_ , )
@apply_forward_hook
def UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->VQEncoderOutput:
'''simple docstring'''
UpperCAmelCase__ = self.encoder(lowerCamelCase_ )
UpperCAmelCase__ = self.quant_conv(lowerCamelCase_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowerCamelCase_ )
@apply_forward_hook
def UpperCAmelCase ( self : int , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.quantize(lowerCamelCase_ )
else:
UpperCAmelCase__ = h
UpperCAmelCase__ = self.post_quant_conv(lowerCamelCase_ )
UpperCAmelCase__ = self.decoder(lowerCamelCase_ , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
def UpperCAmelCase ( self : Any , lowerCamelCase_ : torch.FloatTensor , lowerCamelCase_ : bool = True ) ->Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase__ = sample
UpperCAmelCase__ = self.encode(lowerCamelCase_ ).latents
UpperCAmelCase__ = self.decode(lowerCamelCase_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase_ )
| 392
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__a = logging.get_logger(__name__)
@dataclass
class lowercase__:
"""simple docstring"""
a :str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
a :str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
a :int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a :bool = field(
default=UpperCAmelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def _lowercase ( self : Optional[Any] ) -> Any:
lowercase_ = self.task_name.lower()
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Tuple = 'train'
a :Optional[int] = 'dev'
a :Optional[int] = 'test'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :GlueDataTrainingArguments
a :str
a :List[InputFeatures]
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : GlueDataTrainingArguments , SCREAMING_SNAKE_CASE_ : PreTrainedTokenizerBase , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Union[str, Split] = Split.train , SCREAMING_SNAKE_CASE_ : Optional[str] = None , ) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , SCREAMING_SNAKE_CASE_ , )
lowercase_ = args
lowercase_ = glue_processors[args.task_name]()
lowercase_ = glue_output_modes[args.task_name]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowercase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowercase_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ , lowercase_ = label_list[2], label_list[1]
lowercase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ = cached_features_file + '''.lock'''
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
lowercase_ = time.time()
lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowercase_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase_ = self.processor.get_test_examples(args.data_dir )
else:
lowercase_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase_ = examples[:limit_length]
lowercase_ = glue_convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , max_length=args.max_seq_length , label_list=SCREAMING_SNAKE_CASE_ , output_mode=self.output_mode , )
lowercase_ = time.time()
torch.save(self.features , SCREAMING_SNAKE_CASE_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : int ) -> Tuple:
return len(self.features )
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> InputFeatures:
return self.features[i]
def _lowercase ( self : Any ) -> List[Any]:
return self.label_list
| 409
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> int:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Tuple ) -> Tuple:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : List[Any] ) -> int:
lowercase_ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Any ) -> int:
# pass variant but use the non-variant filenames
lowercase_ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase_ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Dict ) -> Any:
# pass variant but use the non-variant filenames
lowercase_ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
lowercase_ = '''fp16'''
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
lowercase_ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
lowercase_ = '''fp16'''
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
| 409
| 1
|
'''simple docstring'''
import socket
def __A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase : Dict = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
_UpperCamelCase : Tuple = socket.gethostname()
_UpperCamelCase : str = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" ,"wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
_UpperCamelCase : int = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(UpperCAmelCase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 435
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase_ : Dict = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , lowercase__ : Tuple , lowercase__ : Tuple=7 , lowercase__ : Any=3 , lowercase__ : Optional[Any]=18 , lowercase__ : int=30 , lowercase__ : Dict=400 , lowercase__ : List[Any]=None , lowercase__ : List[str]=True , lowercase__ : Optional[Any]=True , lowercase__ : Tuple=None , ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Dict = size if size is not None else {"height": 20, "width": 20}
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : Union[str, Any] = image_size
_UpperCamelCase : Tuple = min_resolution
_UpperCamelCase : Tuple = max_resolution
_UpperCamelCase : List[Any] = size
_UpperCamelCase : Dict = do_normalize
_UpperCamelCase : Tuple = do_convert_rgb
_UpperCamelCase : str = [512, 1_024, 2_048, 4_096]
_UpperCamelCase : Optional[int] = patch_size if patch_size is not None else {"height": 16, "width": 16}
def snake_case__ ( self : int ) ->Any:
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : str = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
_UpperCamelCase : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def snake_case__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Any = PixaStructImageProcessingTester(self )
@property
def snake_case__ ( self : Any ) ->Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def snake_case__ ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processor_tester.prepare_dummy_image()
_UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
_UpperCamelCase : int = 2_048
_UpperCamelCase : List[Any] = image_processor(lowercase__ , return_tensors="pt" , max_patches=lowercase__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0_6_0_6 ) , atol=1e-3 , rtol=1e-3 ) )
def snake_case__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_UpperCamelCase : Tuple = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : List[str] = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_UpperCamelCase : Any = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
_UpperCamelCase : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(lowercase__ ):
_UpperCamelCase : Any = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
_UpperCamelCase : List[Any] = "Hello"
_UpperCamelCase : List[str] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ , header_text=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ , header_text=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray )
_UpperCamelCase : Union[str, Any] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Union[str, Any] = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase : Optional[int] = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : Tuple = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Any = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def snake_case__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : int = PixaStructImageProcessingTester(self , num_channels=4 )
_UpperCamelCase : Optional[int] = 3
@property
def snake_case__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase__ , "do_convert_rgb" ) )
def snake_case__ ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ )
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image )
# Test not batched input
_UpperCamelCase : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_UpperCamelCase : str = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_UpperCamelCase : Tuple = image_processor(
lowercase__ , return_tensors="pt" , max_patches=lowercase__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 435
| 1
|
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __UpperCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = 32 , __magic_name__ = True , __magic_name__ = 1 / 255 , __magic_name__ = True , __magic_name__ = True , __magic_name__ = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] , __magic_name__ = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] , __magic_name__ = True , __magic_name__=7 , __magic_name__=30 , __magic_name__=400 , __magic_name__=3 , ):
"""simple docstring"""
A_ : Optional[Any] = parent
A_ : List[str] = do_resize
A_ : str = size if size is not None else {"""shortest_edge""": 288}
A_ : Optional[int] = size_divisor
A_ : int = do_rescale
A_ : str = rescale_factor
A_ : str = do_normalize
A_ : List[str] = do_center_crop
A_ : List[Any] = image_mean
A_ : Dict = image_std
A_ : int = do_pad
A_ : Dict = batch_size
A_ : Dict = num_channels
A_ : List[str] = min_resolution
A_ : str = max_resolution
def UpperCAmelCase ( self ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def UpperCAmelCase ( self , __magic_name__ , __magic_name__=False ):
"""simple docstring"""
if not batched:
A_ : List[str] = self.size["""shortest_edge"""]
A_ : Optional[int] = image_inputs[0]
if isinstance(__a , Image.Image ):
A_ : Dict = image.size
else:
A_ : str = image.shape[1], image.shape[2]
A_ : Dict = size / min(__a , __a )
if h < w:
A_ : List[Any] = size, scale * w
else:
A_ : int = scale * h, size
A_ : Any = int((1333 / 800) * size )
if max(__a , __a ) > max_size:
A_ : Union[str, Any] = max_size / max(__a , __a )
A_ : Any = newh * scale
A_ : Dict = neww * scale
A_ : Union[str, Any] = int(newh + 0.5 ), int(neww + 0.5 )
A_ : Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
A_ : List[str] = []
for image in image_inputs:
A_ : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A_ : List[Any] = max(__a , key=lambda __magic_name__ : item[0] )[0]
A_ : str = max(__a , key=lambda __magic_name__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase( __lowercase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = BridgeTowerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[str] = BridgeTowerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , '''image_mean''' ) )
self.assertTrue(hasattr(__a , '''image_std''' ) )
self.assertTrue(hasattr(__a , '''do_normalize''' ) )
self.assertTrue(hasattr(__a , '''do_resize''' ) )
self.assertTrue(hasattr(__a , '''size''' ) )
self.assertTrue(hasattr(__a , '''size_divisor''' ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
pass
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ : str = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Dict = image_processing(__a , return_tensors='''pt''' ).pixel_values
A_ : List[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
A_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ : Union[str, Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : List[str] = image_processing(__a , return_tensors='''pt''' ).pixel_values
A_ : int = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase ( self ):
"""simple docstring"""
A_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
A_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
A_ : Dict = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A_ : Optional[int] = image_processing(__a , return_tensors='''pt''' ).pixel_values
A_ : Any = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 720
|
import os
from collections.abc import Iterator
def a__ ( a = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(a ):
A_ : List[Any] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(a )[1] in (".py", ".ipynb"):
yield os.path.join(a , a ).lstrip('''./''' )
def a__ ( a ) -> int:
return f"""{i * ' '}*""" if i else "\n##"
def a__ ( a , a ) -> str:
A_ : Optional[int] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(a ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(a )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def a__ ( a = "." ) -> None:
A_ : List[str] = ''''''
for filepath in sorted(good_file_paths(a ) ):
A_ , A_ : List[Any] = os.path.split(a )
if filepath != old_path:
A_ : Dict = print_path(a , a )
A_ : Any = (filepath.count(os.sep ) + 1) if filepath else 0
A_ : Dict = f"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
A_ : Optional[int] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f"""{md_prefix(a )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 236
| 0
|
def UpperCAmelCase_ ( ) -> list[list[int]]:
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase__ : List[Any] = generate_large_matrix()
lowerCamelCase__ : List[Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> None:
assert all(row == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for row in grid )
assert all(list(__UpperCAmelCase ) == sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ) for col in zip(*__UpperCAmelCase ) )
def UpperCAmelCase_ ( __UpperCAmelCase : list[int] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(__UpperCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
SCREAMING_SNAKE_CASE_ = (left + right) // 2
SCREAMING_SNAKE_CASE_ = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
SCREAMING_SNAKE_CASE_ = mid + 1
else:
SCREAMING_SNAKE_CASE_ = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(grid[0] )
for i in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = find_negative_index(grid[i][:bound] )
total += bound
return (len(__UpperCAmelCase ) * len(grid[0] )) - total
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def UpperCAmelCase_ ( __UpperCAmelCase : list[list[int]] ) -> int:
SCREAMING_SNAKE_CASE_ = 0
for row in grid:
for i, number in enumerate(__UpperCAmelCase ):
if number < 0:
total += len(__UpperCAmelCase ) - i
break
return total
def UpperCAmelCase_ ( ) -> None:
from timeit import timeit
print('Running benchmarks' )
SCREAMING_SNAKE_CASE_ = (
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
SCREAMING_SNAKE_CASE_ = timeit(f"{func}(grid=grid)" , setup=__UpperCAmelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 31
|
from sklearn.metrics import recall_score
import datasets
UpperCamelCase_ = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
UpperCamelCase_ = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
UpperCamelCase_ = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : int=1 , snake_case__ : List[str]="binary" , snake_case__ : Tuple=None , snake_case__ : Dict="warn" , ):
"""simple docstring"""
__lowerCAmelCase = recall_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ , zero_division=snake_case__ , )
return {"recall": float(snake_case__ ) if score.size == 1 else score}
| 611
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a_ : Any = logging.get_logger(__name__)
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTConfig()
if "large" in checkpoint_url:
SCREAMING_SNAKE_CASE = 10_24
SCREAMING_SNAKE_CASE = 40_96
SCREAMING_SNAKE_CASE = 24
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = [5, 11, 17, 23]
SCREAMING_SNAKE_CASE = [2_56, 5_12, 10_24, 10_24]
SCREAMING_SNAKE_CASE = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = 1_50
SCREAMING_SNAKE_CASE = 'huggingface/label-files'
SCREAMING_SNAKE_CASE = 'ade20k-id2label.json'
SCREAMING_SNAKE_CASE = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) ) , 'r' ) )
SCREAMING_SNAKE_CASE = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
SCREAMING_SNAKE_CASE = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
SCREAMING_SNAKE_CASE = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
SCREAMING_SNAKE_CASE = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
SCREAMING_SNAKE_CASE = name.replace('proj' , 'projection' )
if "blocks" in name:
SCREAMING_SNAKE_CASE = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
SCREAMING_SNAKE_CASE = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
SCREAMING_SNAKE_CASE = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
SCREAMING_SNAKE_CASE = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
SCREAMING_SNAKE_CASE = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
SCREAMING_SNAKE_CASE = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
SCREAMING_SNAKE_CASE = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
SCREAMING_SNAKE_CASE = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
SCREAMING_SNAKE_CASE = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
SCREAMING_SNAKE_CASE = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
SCREAMING_SNAKE_CASE = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
SCREAMING_SNAKE_CASE = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
SCREAMING_SNAKE_CASE = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
SCREAMING_SNAKE_CASE = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
SCREAMING_SNAKE_CASE = name.replace('pretrained' , 'dpt' )
if "bn" in name:
SCREAMING_SNAKE_CASE = name.replace('bn' , 'batch_norm' )
if "head" in name:
SCREAMING_SNAKE_CASE = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
SCREAMING_SNAKE_CASE = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
SCREAMING_SNAKE_CASE = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE = in_proj_weight[: config.hidden_size, :]
SCREAMING_SNAKE_CASE = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE = in_proj_bias[-config.hidden_size :]
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_dpt_config(_UpperCamelCase )
# load original state_dict from URL
SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(_UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE = state_dict.pop(_UpperCamelCase )
SCREAMING_SNAKE_CASE = val
# read in qkv matrices
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
SCREAMING_SNAKE_CASE = DPTForSemanticSegmentation(_UpperCamelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Check outputs on an image
SCREAMING_SNAKE_CASE = 4_80 if 'ade' in checkpoint_url else 3_84
SCREAMING_SNAKE_CASE = DPTImageProcessor(size=_UpperCamelCase )
SCREAMING_SNAKE_CASE = prepare_img()
SCREAMING_SNAKE_CASE = image_processor(_UpperCamelCase , return_tensors='pt' )
# forward pass
SCREAMING_SNAKE_CASE = model(**_UpperCamelCase ).logits if 'ade' in checkpoint_url else model(**_UpperCamelCase ).predicted_depth
# Assert logits
SCREAMING_SNAKE_CASE = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
SCREAMING_SNAKE_CASE = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(_UpperCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _UpperCamelCase , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _UpperCamelCase )
)
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
a_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
a_ : Any = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 673
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 673
| 1
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowerCamelCase__ , unittest.TestCase ):
__UpperCAmelCase = MgpstrTokenizer
__UpperCAmelCase = False
__UpperCAmelCase = {}
__UpperCAmelCase = False
def _a ( self) -> Optional[Any]:
super().setUp()
# fmt: off
__snake_case = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__snake_case = dict(zip(lowercase_ , range(len(lowercase_))))
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase_) + '\n')
def _a ( self , **lowercase_) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase_)
def _a ( self , lowercase_) -> Optional[int]:
__snake_case = 'tester'
__snake_case = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.')
def _a ( self) -> Optional[int]:
pass
def _a ( self) -> Dict:
__snake_case = self.get_tokenizers(do_lower_case=lowercase_)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__snake_case = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
__snake_case = tokenizer.encode([special_token] , add_special_tokens=lowercase_)
self.assertEqual(len(lowercase_) , 1)
__snake_case = tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_)
self.assertTrue(special_token not in decoded)
def _a ( self) -> Union[str, Any]:
__snake_case = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__snake_case , __snake_case = self.get_input_output_texts(lowercase_)
__snake_case = tokenizer.tokenize(lowercase_)
__snake_case = tokenizer.convert_tokens_to_ids(lowercase_)
__snake_case = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_)
self.assertListEqual(lowercase_ , lowercase_)
__snake_case = tokenizer.convert_ids_to_tokens(lowercase_)
self.assertNotEqual(len(lowercase_) , 0)
__snake_case = tokenizer.decode(lowercase_)
self.assertIsInstance(lowercase_ , lowercase_)
self.assertEqual(text_a.replace(' ' , '') , lowercase_)
@unittest.skip('MGP-STR tokenizer only handles one sequence.')
def _a ( self) -> Optional[int]:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer')
def _a ( self) -> Optional[Any]:
pass
| 313
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''ViltImageProcessor'''
__UpperCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> List[Any]:
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
__snake_case = kwargs.pop('feature_extractor')
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
__snake_case = self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
__snake_case = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
__snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_)
encoding.update(lowercase_)
return encoding
def _a ( self , *lowercase_ , **lowercase_) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _a ( self) -> Tuple:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _a ( self) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def _a ( self) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 313
| 1
|
"""simple docstring"""
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
__A : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )]
__A : Any = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1 or len(_SCREAMING_SNAKE_CASE ) <= key:
return input_string
for position, character in enumerate(_SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = position % (lowest * 2) # puts it in bounds
__A : Optional[Any] = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_SCREAMING_SNAKE_CASE )
__A : Optional[int] = [''.join(_SCREAMING_SNAKE_CASE ) for row in temp_grid]
__A : Optional[Any] = ''.join(_SCREAMING_SNAKE_CASE )
return output_string
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> str:
'''simple docstring'''
__A : List[Any] = []
__A : List[Any] = key - 1
if key <= 0:
raise ValueError('Height of grid can\'t be 0 or negative' )
if key == 1:
return input_string
__A : list[list[str]] = [[] for _ in range(_SCREAMING_SNAKE_CASE )] # generates template
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
__A : str = position % (lowest * 2) # puts it in bounds
__A : Tuple = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('*' )
__A : Tuple = 0
for row in temp_grid: # fills in the characters
__A : Any = input_string[counter : counter + len(_SCREAMING_SNAKE_CASE )]
grid.append(list(_SCREAMING_SNAKE_CASE ) )
counter += len(_SCREAMING_SNAKE_CASE )
__A : Optional[Any] = '' # reads as zigzag
for position in range(len(_SCREAMING_SNAKE_CASE ) ):
__A : Optional[int] = position % (lowest * 2) # puts it in bounds
__A : int = min(_SCREAMING_SNAKE_CASE , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowercase ( _SCREAMING_SNAKE_CASE : str ) -> dict[int, str]:
'''simple docstring'''
__A : int = {}
for key_guess in range(1 , len(_SCREAMING_SNAKE_CASE ) ): # tries every key
__A : List[Any] = decrypt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 237
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] =logging.get_logger(__name__)
lowerCamelCase : str ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : Tuple ={
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCamelCase : Optional[int] ={
'''camembert-base''': 5_12,
}
lowerCamelCase : Dict ='''▁'''
class __snake_case( A_ ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __lowerCamelCase , __lowerCamelCase="<s>" , __lowerCamelCase="</s>" , __lowerCamelCase="</s>" , __lowerCamelCase="<s>" , __lowerCamelCase="<unk>" , __lowerCamelCase="<pad>" , __lowerCamelCase="<mask>" , __lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , __lowerCamelCase = None , **__lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
__A : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , additional_special_tokens=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
__A : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A : str = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
__A : str = len(self.fairseq_tokens_to_ids )
__A : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Optional[Any] = [self.cls_token_id]
__A : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
__A : int = [self.sep_token_id]
__A : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _a ( self ):
'''simple docstring'''
__A : int = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__lowerCamelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__lowerCamelCase )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , __lowerCamelCase ):
'''simple docstring'''
__A : Tuple = []
__A : Optional[int] = ''
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
__A : Optional[Any] = True
__A : List[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
__A : List[str] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def __getstate__( self ):
'''simple docstring'''
__A : Optional[int] = self.__dict__.copy()
__A : List[Any] = None
return state
def __setstate__( self , __lowerCamelCase ):
'''simple docstring'''
__A : int = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : Dict = {}
__A : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self , __lowerCamelCase , __lowerCamelCase = None ):
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__A : str = os.path.join(
__lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , 'wb' ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 237
| 1
|
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase_ ( _lowercase : Any) -> List[str]:
"""simple docstring"""
return {key.lstrip("""-"""): value for key, value in zip(unknown_args[::2] , unknown_args[1::2])}
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
a__ : Optional[int] = ArgumentParser(
"""HuggingFace Datasets CLI tool""" , usage="""datasets-cli <command> [<args>]""" , allow_abbrev=a__)
a__ : Optional[int] = parser.add_subparsers(help="""datasets-cli command helpers""")
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(a__)
EnvironmentCommand.register_subcommand(a__)
TestCommand.register_subcommand(a__)
RunBeamCommand.register_subcommand(a__)
DummyDataCommand.register_subcommand(a__)
# Parse args
a__ , a__ : List[Any] = parser.parse_known_args()
if not hasattr(a__ , """func"""):
parser.print_help()
exit(1)
a__ : List[str] = parse_unknown_args(a__)
# Run
a__ : Any = args.func(a__ , **a__)
service.run()
if __name__ == "__main__":
main()
| 136
|
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
A : Any = logging.get_logger(__name__)
class __A:
def __init__( self , _snake_case , _snake_case ) -> int:
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
if os.path.isfile(_snake_case ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(_snake_case , exist_ok=_snake_case )
__a = os.path.join(_snake_case , '''question_encoder_tokenizer''' )
__a = os.path.join(_snake_case , '''generator_tokenizer''' )
self.question_encoder.save_pretrained(_snake_case )
self.generator.save_pretrained(_snake_case )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case , **_snake_case ) -> List[Any]:
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , _snake_case )
if config is None:
__a = RagConfig.from_pretrained(_snake_case )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.question_encoder , subfolder='''question_encoder_tokenizer''' )
__a = AutoTokenizer.from_pretrained(
_snake_case , config=config.generator , subfolder='''generator_tokenizer''' )
return cls(question_encoder=_snake_case , generator=_snake_case )
def __call__( self , *_snake_case , **_snake_case ) -> int:
'''simple docstring'''
return self.current_tokenizer(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> str:
'''simple docstring'''
return self.generator.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , *_snake_case , **_snake_case ) -> Any:
'''simple docstring'''
return self.generator.decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
__a = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
__a = self.generator
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = "longest" , _snake_case = None , _snake_case = True , **_snake_case , ) -> BatchEncoding:
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , _snake_case , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , max_length=_snake_case , padding=_snake_case , truncation=_snake_case , **_snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=_snake_case , add_special_tokens=_snake_case , return_tensors=_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , **_snake_case , )
__a = labels['''input_ids''']
return model_inputs
| 219
| 0
|
import string
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
__snake_case :Any = """"""
for i in sequence:
__snake_case :List[Any] = ord(snake_case__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
__snake_case :Dict = string.ascii_letters
__snake_case :List[Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case__ )] if c in letters else c for c in sequence )
def UpperCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("""Running performance benchmarks...""" )
__snake_case :List[Any] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' ,setup=snake_case__ )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' ,setup=snake_case__ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 291
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCamelCase ( snake_case__ : str ,snake_case__ : Dict ,snake_case__ : List[str] ):
'''simple docstring'''
__snake_case :Tuple = 1.5
__snake_case :Any = int(factor * num_class_images )
__snake_case :List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=snake_case__ ,aesthetic_weight=0.1 )
os.makedirs(f'''{class_data_dir}/images''' ,exist_ok=snake_case__ )
if len(list(Path(f'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
__snake_case :Optional[Any] = client.query(text=snake_case__ )
if len(snake_case__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
__snake_case :Tuple = int(factor * num_images )
__snake_case :Any = ClipClient(
url="""https://knn.laion.ai/knn-service""" ,indice_name="""laion_400m""" ,num_images=snake_case__ ,aesthetic_weight=0.1 ,)
__snake_case :Dict = 0
__snake_case :Tuple = 0
__snake_case :Dict = tqdm(desc="""downloading real regularization images""" ,total=snake_case__ )
with open(f'''{class_data_dir}/caption.txt''' ,"""w""" ) as fa, open(f'''{class_data_dir}/urls.txt''' ,"""w""" ) as fa, open(
f'''{class_data_dir}/images.txt''' ,"""w""" ) as fa:
while total < num_class_images:
__snake_case :List[Any] = class_images[count]
count += 1
try:
__snake_case :str = requests.get(images["""url"""] )
if img.status_code == 200:
__snake_case :Any = Image.open(BytesIO(img.content ) )
with open(f'''{class_data_dir}/images/{total}.jpg''' ,"""wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(f'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :List[str] = argparse.ArgumentParser("""""" ,add_help=snake_case__ )
parser.add_argument("""--class_prompt""" ,help="""text prompt to retrieve images""" ,required=snake_case__ ,type=snake_case__ )
parser.add_argument("""--class_data_dir""" ,help="""path to save images""" ,required=snake_case__ ,type=snake_case__ )
parser.add_argument("""--num_class_images""" ,help="""number of images to download""" ,default=200 ,type=snake_case__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 291
| 1
|
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=0.999 , UpperCAmelCase_="cosine" , )-> Dict:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCamelCase = []
for i in range(UpperCAmelCase_ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_ ) / alpha_bar_fn(UpperCAmelCase_ ) , UpperCAmelCase_ ) )
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa )
class __a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__( self : Optional[int] , UpperCAmelCase_ : int = 1_000 , UpperCAmelCase_ : str = "fixed_small_log" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[float] = 1.0 , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : str = "squaredcos_cap_v2" , )-> Optional[Any]:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCamelCase = betas_for_alpha_bar(UpperCAmelCase_ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCAmelCase_ )[::-1].copy() )
UpperCamelCase = variance_type
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[int] = None )-> torch.FloatTensor:
"""simple docstring"""
return sample
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCAmelCase_ ).to(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[str]=None )-> Union[str, Any]:
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCAmelCase_ , min=1e-20 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def _SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : bool = True , )-> Union[UnCLIPSchedulerOutput, Tuple]:
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCAmelCase_ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCAmelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCAmelCase_ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCAmelCase_ , predicted_variance=UpperCAmelCase_ , prev_timestep=UpperCAmelCase_ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
" for the UnCLIPScheduler." )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCAmelCase_ , pred_original_sample=UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.IntTensor , )-> torch.FloatTensor:
"""simple docstring"""
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 554
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __a ( _lowerCAmelCase ):
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any )-> None:
"""simple docstring"""
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 554
| 1
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCamelCase( _a ):
snake_case_ : torch.FloatTensor
snake_case_ : torch.FloatTensor
snake_case_ : Optional[torch.FloatTensor] = None
class UpperCamelCase( _a , _a ):
snake_case_ : List[Any] = 2
@register_to_config
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : float = 1_0_0 , SCREAMING_SNAKE_CASE : float = 1.007 , SCREAMING_SNAKE_CASE : float = 8_0 , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 5_0 , ) -> int:
'''simple docstring'''
__snake_case = sigma_max
# setable values
__snake_case = None
__snake_case = None
__snake_case = None # sigma(t_i)
def SCREAMING_SNAKE_CASE_ ( self : int , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, torch.device] = None ) -> Any:
'''simple docstring'''
__snake_case = num_inference_steps
__snake_case = np.arange(0 , self.num_inference_steps )[::-1].copy()
__snake_case = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
__snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
__snake_case = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
__snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
__snake_case = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE ).to(sample.device )
__snake_case = sigma + gamma * sigma
__snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def SCREAMING_SNAKE_CASE_ ( self : Dict , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
__snake_case = sample_hat + sigma_hat * model_output
__snake_case = (sample_hat - pred_original_sample) / sigma_hat
__snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : torch.FloatTensor , SCREAMING_SNAKE_CASE : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
__snake_case = sample_prev + sigma_prev * model_output
__snake_case = (sample_prev - pred_original_sample) / sigma_prev
__snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE , derivative=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 473
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=3_7 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=1_0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Dict=[1, 1_6, 4, 4] , SCREAMING_SNAKE_CASE : List[str]=None , ) -> int:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
__snake_case = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__snake_case = (self.image_size // 3_2) ** 2
__snake_case = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 1_6, 3_2],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.type_sequence_label_size
__snake_case = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ : str = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : Tuple = False
snake_case_ : Optional[Any] = False
snake_case_ : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__snake_case = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
__snake_case = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
__snake_case = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
__snake_case = model(**SCREAMING_SNAKE_CASE )
__snake_case = outputs.logits
# model predicts one of the 1000 ImageNet classes
__snake_case = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 473
| 1
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :str = parent
__magic_name__ :Dict = batch_size
__magic_name__ :List[Any] = seq_length
__magic_name__ :int = is_training
__magic_name__ :Optional[int] = use_input_mask
__magic_name__ :Tuple = use_token_type_ids
__magic_name__ :Optional[Any] = use_labels
__magic_name__ :Tuple = vocab_size
__magic_name__ :List[str] = hidden_size
__magic_name__ :str = num_hidden_layers
__magic_name__ :int = num_attention_heads
__magic_name__ :Tuple = intermediate_multiple_size
__magic_name__ :int = hidden_act
__magic_name__ :Optional[int] = hidden_dropout
__magic_name__ :Optional[Any] = attention_dropout
__magic_name__ :Optional[Any] = weight_tying
__magic_name__ :str = max_position_embeddings
__magic_name__ :Any = type_vocab_size
__magic_name__ :Optional[int] = type_sequence_label_size
__magic_name__ :List[str] = initializer_range
__magic_name__ :Dict = num_labels
__magic_name__ :Optional[Any] = num_choices
__magic_name__ :List[Any] = scope
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :Optional[Any] = None
if self.use_input_mask:
__magic_name__ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :List[str] = None
if self.use_labels:
__magic_name__ :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def A ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[Any] = self.prepare_config_and_inputs()
__magic_name__ :Union[str, Any] = True
return config, input_ids, input_mask, token_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = GPTNeoXJapaneseModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = True
__magic_name__ :Union[str, Any] = GPTNeoXJapaneseModel(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :Any = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :str = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__magic_name__ :List[Any] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = True
__magic_name__ :Dict = GPTNeoXJapaneseForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
# first forward pass
__magic_name__ :List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , use_cache=__lowerCAmelCase )
__magic_name__ :Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ :str = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ :List[str] = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ :Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ :Tuple = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
__magic_name__ :int = output_from_no_past['''hidden_states'''][0]
__magic_name__ :Dict = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__magic_name__ :int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ :Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ :Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :Optional[int] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
a__ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def A ( self ):
"""simple docstring"""
__magic_name__ :int = GPTNeoXJapaneseModelTester(self )
__magic_name__ :Union[str, Any] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
# This regression test was failing with PyTorch < 1.3
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :int = self.model_tester.prepare_config_and_inputs_for_decoder()
__magic_name__ :List[str] = None
self.model_tester.create_and_check_model_as_decoder(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = '''abeja/gpt-neox-japanese-2.7b'''
__magic_name__ :Union[str, Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__magic_name__ :Union[str, Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__magic_name__ :Optional[int] = GPTNeoXJapaneseTokenizer.from_pretrained(__lowerCAmelCase )
__magic_name__ :Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(__lowerCAmelCase )
__magic_name__ :Optional[Any] = []
for prompt in prompts:
__magic_name__ :Optional[int] = tokenizer(__lowerCAmelCase , return_tensors='''pt''' ).input_ids
__magic_name__ :List[Any] = model.generate(__lowerCAmelCase , max_length=5_0 )
__magic_name__ :Dict = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : int = "distilbert"
a : Union[str, Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self ,_lowerCamelCase=30522 ,_lowerCamelCase=512 ,_lowerCamelCase=False ,_lowerCamelCase=6 ,_lowerCamelCase=12 ,_lowerCamelCase=768 ,_lowerCamelCase=4 * 768 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.2 ,_lowerCamelCase=0 ,**_lowerCamelCase ,) -> Tuple:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = max_position_embeddings
__lowercase = sinusoidal_pos_embds
__lowercase = n_layers
__lowercase = n_heads
__lowercase = dim
__lowercase = hidden_dim
__lowercase = dropout
__lowercase = attention_dropout
__lowercase = activation
__lowercase = initializer_range
__lowercase = qa_dropout
__lowercase = seq_classif_dropout
super().__init__(**_lowerCamelCase ,pad_token_id=_lowerCamelCase )
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
@property
def _UpperCAmelCase (self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__lowercase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowercase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 502
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE__ : Tuple = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = ['YolosFeatureExtractor']
SCREAMING_SNAKE_CASE__ : int = ['YolosImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : str = [
'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST',
'YolosForObjectDetection',
'YolosModel',
'YolosPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_yolos import YolosFeatureExtractor
from .image_processing_yolos import YolosImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_yolos import (
YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST,
YolosForObjectDetection,
YolosModel,
YolosPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 711
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-1"""
SCREAMING_SNAKE_CASE__ : Dict = """CompVis/stable-diffusion-v1-2"""
SCREAMING_SNAKE_CASE__ : Tuple = """CompVis/stable-diffusion-v1-3"""
SCREAMING_SNAKE_CASE__ : str = """CompVis/stable-diffusion-v1-4"""
class __lowerCAmelCase ( _UpperCamelCase ):
def __init__( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case = True , ) -> Any:
"""simple docstring"""
super()._init_()
a__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(snake_case )
a__ : int = StableDiffusionPipeline(
vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , unet=snake_case , scheduler=snake_case , safety_checker=snake_case , feature_extractor=snake_case , requires_safety_checker=snake_case , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def _snake_case ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , snake_case ) for k in self.config.keys() if not k.startswith("_" )}
def _snake_case ( self , snake_case = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
a__ : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case )
def _snake_case ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(snake_case )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Optional[int]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
return self.pipea(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
@torch.no_grad()
def _snake_case ( self , snake_case , snake_case = 512 , snake_case = 512 , snake_case = 50 , snake_case = 7.5 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = None , snake_case = None , snake_case = "pil" , snake_case = True , snake_case = None , snake_case = 1 , **snake_case , ) -> Dict:
"""simple docstring"""
a__ : Any = "cuda" if torch.cuda.is_available() else "cpu"
self.to(snake_case )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
a__ : Any = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.2
a__ : List[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.3
a__ : Optional[Any] = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get first result from Stable Diffusion Checkpoint v1.4
a__ : Dict = self.textaimg_sda_a(
prompt=snake_case , height=snake_case , width=snake_case , num_inference_steps=snake_case , guidance_scale=snake_case , negative_prompt=snake_case , num_images_per_prompt=snake_case , eta=snake_case , generator=snake_case , latents=snake_case , output_type=snake_case , return_dict=snake_case , callback=snake_case , callback_steps=snake_case , **snake_case , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 629
| 0
|
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
model.train()
A_ : List[Any] = model(__lowerCamelCase )
A_ : Any = F.mse_loss(__lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
set_seed(42 )
A_ : str = RegressionModel()
A_ : List[str] = deepcopy(__lowerCamelCase )
A_ : Tuple = RegressionDataset(length=80 )
A_ : List[str] = DataLoader(__lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
A_ : Optional[Any] = AdamW(params=model.parameters() , lr=1e-3 )
A_ : str = AdamW(params=ddp_model.parameters() , lr=1e-3 )
A_ : List[Any] = LambdaLR(__lowerCamelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.6_5 )
A_ : Tuple = LambdaLR(__lowerCamelCase , lr_lambda=lambda SCREAMING_SNAKE_CASE : epoch**0.6_5 )
# Make a copy of `model`
if sched:
A_ : Dict = accelerator.prepare(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
A_ : Dict = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# Test when on a single CPU or GPU that the context manager does nothing
A_ : Optional[int] = get_training_setup(__lowerCamelCase )
# Use a single batch
A_ : Tuple = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ : Any = accelerator.gather((ddp_input, ddp_target) )
A_ : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A_ : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# Test on distributed setup that context manager behaves properly
A_ : List[str] = get_training_setup(__lowerCamelCase )
# Use a single batch
A_ : Tuple = next(iter(__lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A_ : List[Any] = accelerator.gather((ddp_input, ddp_target) )
A_ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
# Sync grads
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A_ : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
A_ : str = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ : Tuple = get_training_setup(__lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
A_ : Union[str, Any] = batch.values()
# Gather the distributed inputs and targs for the base model
A_ : Optional[int] = accelerator.gather((ddp_input, ddp_target) )
A_ : Dict = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A_ : Optional[Any] = ddp_input[torch.randperm(len(__lowerCamelCase ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
A_ : str = Accelerator(
split_batches=__lowerCamelCase , dispatch_batches=__lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A_ : Dict = get_training_setup(__lowerCamelCase , __lowerCamelCase )
for iteration, batch in enumerate(__lowerCamelCase ):
A_ : int = batch.values()
# Gather the distributed inputs and targs for the base model
A_ : Dict = accelerator.gather((ddp_input, ddp_target) )
A_ : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCamelCase ):
step_model(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A_ : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( ):
A_ : str = Accelerator()
A_ : int = RegressionDataset(length=80 )
A_ : Union[str, Any] = DataLoader(__lowerCamelCase , batch_size=16 )
A_ : int = RegressionDataset(length=96 )
A_ : List[str] = DataLoader(__lowerCamelCase , batch_size=16 )
A_ : Any = accelerator.prepare(__lowerCamelCase , __lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if iteration < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCamelCase )
if batch_num < len(__lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE ( ):
A_ : Tuple = Accelerator()
A_ : List[str] = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(__lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(__lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(__lowerCamelCase , __lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , f'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCamelCase , __lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 590
|
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__:
'''simple docstring'''
def __init__( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = {}
def UpperCamelCase_ ( self , __lowerCamelCase ) -> None:
_SCREAMING_SNAKE_CASE : List[Any] = {}
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> None:
if nodea not in self.connections:
self.add_node(__lowerCamelCase )
if nodea not in self.connections:
self.add_node(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = probability
def UpperCamelCase_ ( self ) -> list[str]:
return list(self.connections )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
_SCREAMING_SNAKE_CASE : List[Any] = 0
_SCREAMING_SNAKE_CASE : Optional[int] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = Counter(graph.get_nodes() )
_SCREAMING_SNAKE_CASE : Union[str, Any] = start
for _ in range(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = graph.transition(__lowerCamelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 249
| 0
|
import baseaa
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCAmelCase ( _lowerCamelCase : bytes ):
'''simple docstring'''
return baseaa.baadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
__lowercase :Optional[Any] = "Hello World!"
__lowercase :Any = baseaa_encode(test)
print(encoded)
__lowercase :Dict = baseaa_decode(encoded)
print(decoded)
| 26
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowercase :List[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def UpperCAmelCase ( _lowerCamelCase : int ):
'''simple docstring'''
config.addinivalue_line(
"markers" , "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" )
config.addinivalue_line(
"markers" , "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" )
config.addinivalue_line("markers" , "is_pipeline_test: mark test to run only when pipelines are tested" )
config.addinivalue_line("markers" , "is_staging_test: mark test to run only in the staging environment" )
config.addinivalue_line("markers" , "accelerate_tests: mark test that require accelerate" )
config.addinivalue_line("markers" , "tool_tests: mark the tool tests that are run on their specific schedule" )
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : Tuple ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE__ : List[str] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
'''simple docstring'''
if exitstatus == 5:
SCREAMING_SNAKE_CASE__ : List[str] = 0
# Doctest custom flag to ignore output.
__lowercase :Optional[Any] = doctest.register_optionflag("IGNORE_RESULT")
__lowercase :Dict = doctest.OutputChecker
class _a ( lowercase__ ):
"""simple docstring"""
def A_ ( self : Dict , a : List[str] , a : Dict , a : int ) ->Optional[Any]:
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , a , a , a )
__lowercase :Any = CustomOutputChecker
__lowercase :Any = HfDoctestModule
__lowercase :int = HfDocTestParser
| 26
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowercase_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
return CLIPTextModel(A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=A_ , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE__ = torch.Generator(device=A_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE__ = unet.half()
SCREAMING_SNAKE_CASE__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type='''np''' , ).images
SCREAMING_SNAKE_CASE__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase_ ( self ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
SCREAMING_SNAKE_CASE__ = '''stabilityai/stable-diffusion-x4-upscaler'''
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = '''a cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 100
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = {"facebook/bart-base": BartForConditionalGeneration}
__lowerCamelCase = {"facebook/bart-base": BartTokenizer}
def lowercase ( ) -> List[str]:
__magic_name__ = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=__UpperCamelCase , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=__UpperCamelCase , default=__UpperCamelCase , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCamelCase , )
parser.add_argument(
'''--config_name''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=__UpperCamelCase , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Where to store the final ONNX file.''' )
__magic_name__ = parser.parse_args()
return args
def lowercase ( __UpperCamelCase , __UpperCamelCase="cpu" ) -> int:
__magic_name__ = model_dict[model_name].from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
__magic_name__ = tokenizer_dict[model_name].from_pretrained(__UpperCamelCase )
if model_name in ["facebook/bart-base"]:
__magic_name__ = 0
__magic_name__ = None
__magic_name__ = 0
return huggingface_model, tokenizer
def lowercase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Any:
model.eval()
__magic_name__ = None
__magic_name__ = torch.jit.script(BARTBeamSearchGenerator(__UpperCamelCase ) )
with torch.no_grad():
__magic_name__ = '''My friends are cool but they eat too many carbs.'''
__magic_name__ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device )
__magic_name__ = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=__UpperCamelCase , max_length=__UpperCamelCase , early_stopping=__UpperCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
__UpperCamelCase , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , __UpperCamelCase , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=__UpperCamelCase , )
logger.info('''Model exported to {}'''.format(__UpperCamelCase ) )
__magic_name__ = remove_dup_initializers(os.path.abspath(__UpperCamelCase ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(__UpperCamelCase ) )
__magic_name__ = onnxruntime.InferenceSession(__UpperCamelCase )
__magic_name__ = ort_sess.run(
__UpperCamelCase , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(__UpperCamelCase ),
'''max_length''': np.array(__UpperCamelCase ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def lowercase ( ) -> Any:
__magic_name__ = parse_args()
__magic_name__ = 5
__magic_name__ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
__magic_name__ = torch.device(args.device )
__magic_name__ , __magic_name__ = load_model_tokenizer(args.model_name_or_path , __UpperCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(__UpperCamelCase )
if args.max_length:
__magic_name__ = args.max_length
if args.num_beams:
__magic_name__ = args.num_beams
if args.output_file_path:
__magic_name__ = args.output_file_path
else:
__magic_name__ = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 490
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'EleutherAI/gpt-neo-1.3B': 'https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class snake_case ( __lowercase ):
UpperCAmelCase__ = '''gpt_neo'''
UpperCAmelCase__ = ['''past_key_values''']
UpperCAmelCase__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=20_48 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=[[["global", "local"], 12]] , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , **SCREAMING_SNAKE_CASE_ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = window_size
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = resid_dropout
SCREAMING_SNAKE_CASE_ = embed_dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = classifier_dropout
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = attention_types
SCREAMING_SNAKE_CASE_ = self.expand_attention_types_params(SCREAMING_SNAKE_CASE_ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.attention_layers)` == `config.num_layers` '''
f'but is `len(config.attention_layers) = {len(self.attention_layers )}`, '
f'`config.num_layers = {self.num_layers}`. '
'''`config.attention_layers` is prepared using `config.attention_types`. '''
'''Please verify the value of `config.attention_types` argument.''' )
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@staticmethod
def _lowercase (SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def _lowerCamelCase ( __a, __a, __a, __a ):
import torch
SCREAMING_SNAKE_CASE_ = input.size()
SCREAMING_SNAKE_CASE_ = len(__a )
SCREAMING_SNAKE_CASE_ = shape[dimension]
SCREAMING_SNAKE_CASE_ = torch.arange(0, __a, __a )
SCREAMING_SNAKE_CASE_ = torch.div(sizedim - size, __a, rounding_mode='''floor''' ) + 1
SCREAMING_SNAKE_CASE_ = torch.arange(__a ) + low_indices[:min_length][:, None]
SCREAMING_SNAKE_CASE_ = [slice(__a )] * rank
SCREAMING_SNAKE_CASE_ = indices
SCREAMING_SNAKE_CASE_ = input[s]
SCREAMING_SNAKE_CASE_ = list(range(0, rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__a )
def _lowerCamelCase ( __a, __a ):
import torch
SCREAMING_SNAKE_CASE_ = torch.arange(1, __a )
SCREAMING_SNAKE_CASE_ = torch.remainder(__a, __a )
SCREAMING_SNAKE_CASE_ = remainders == 0
SCREAMING_SNAKE_CASE_ = candidates[divisor_indices]
SCREAMING_SNAKE_CASE_ = torch.max(__a )
return largest_divisor, torch.div(__a, __a, rounding_mode='''floor''' )
class snake_case ( __lowercase ):
@property
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction='''inputs''' )
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE_ = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _lowercase (self ):
"""simple docstring"""
return self._config.num_heads
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = super(SCREAMING_SNAKE_CASE_ , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , seq_length=SCREAMING_SNAKE_CASE_ , is_pair=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_ = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_ = seqlen + 2
SCREAMING_SNAKE_CASE_ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_ = [
(torch.zeros(SCREAMING_SNAKE_CASE_ ), torch.zeros(SCREAMING_SNAKE_CASE_ )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE_ = common_inputs['''attention_mask''']
if self.use_past:
SCREAMING_SNAKE_CASE_ = ordered_inputs['''attention_mask'''].dtype
SCREAMING_SNAKE_CASE_ = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )] , dim=1 )
return ordered_inputs
@property
def _lowercase (self ):
"""simple docstring"""
return 13
| 706
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 628
| 0
|
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17
|
UpperCAmelCase_ : dict[tuple[int, int, int], int] = {}
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int ,a__ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__A : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__A : Dict = _calculate(days - 1 ,a__ ,late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__A : List[str] = _calculate(days - 1 ,absent + 1 ,0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__A : int = _calculate(days - 1 ,a__ ,0 )
__A : Optional[int] = state_late + state_absent + state_ontime
__A : Tuple = prizestrings
return prizestrings
def __SCREAMING_SNAKE_CASE ( a__ : int = 30 ) -> int:
return _calculate(a__ ,absent=0 ,late=0 )
if __name__ == "__main__":
print(solution())
| 17
| 1
|
def _A ( __magic_name__ = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 611
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowerCAmelCase :
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :str=13 , _lowercase :Tuple=7 , _lowercase :Any=True , _lowercase :Optional[int]=True , _lowercase :Optional[Any]=True , _lowercase :Optional[int]=True , _lowercase :str=99 , _lowercase :Optional[int]=64 , _lowercase :Optional[int]=32 , _lowercase :Union[str, Any]=5 , _lowercase :Optional[int]=4 , _lowercase :Any=37 , _lowercase :Optional[int]="gelu" , _lowercase :Optional[int]=0.1 , _lowercase :str=0.1 , _lowercase :Union[str, Any]=5_12 , _lowercase :Optional[int]=16 , _lowercase :int=2 , _lowercase :Tuple=0.02 , _lowercase :Optional[Any]=3 , _lowercase :Dict=4 , _lowercase :List[Any]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = embedding_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :Tuple , _lowercase :Tuple , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :int , _lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = MegatronBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
lowercase__ = model(_lowercase , token_type_ids=_lowercase )
lowercase__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase ( self :Any , _lowercase :Dict , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Any , _lowercase :List[str] , _lowercase :Any , _lowercase :int ):
'''simple docstring'''
lowercase__ = MegatronBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :Dict , _lowercase :str , _lowercase :Optional[Any] , _lowercase :Optional[Any] , _lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Optional[int] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = MegatronBertForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self :Any , _lowercase :int , _lowercase :Tuple , _lowercase :Optional[int] , _lowercase :Dict , _lowercase :Dict , _lowercase :Optional[int] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = MegatronBertForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any] , _lowercase :str , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Dict , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = MegatronBertForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase ( self :str , _lowercase :Optional[Any] , _lowercase :Tuple , _lowercase :int , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = MegatronBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self :str , _lowercase :str , _lowercase :Any , _lowercase :Dict , _lowercase :Optional[Any] , _lowercase :int , _lowercase :int , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MegatronBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self :List[Any] , _lowercase :List[str] , _lowercase :List[str] , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MegatronBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :List[str] , _lowercase :int , _lowercase :int , _lowercase :List[Any] , _lowercase :List[Any] , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = self.num_choices
lowercase__ = MegatronBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowercase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
__lowerCamelCase = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
__lowerCamelCase = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase = True
# test_resize_embeddings = False
__lowerCamelCase = False
def UpperCAmelCase ( self :str , _lowercase :Tuple , _lowercase :str , _lowercase :int=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
lowercase__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
lowercase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = MegatronBertModelTester(self )
lowercase__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*_lowercase )
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*_lowercase )
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*_lowercase )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*_lowercase )
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*_lowercase )
def _A ( __magic_name__ ):
return torch.tensor(
__magic_name__ , dtype=torch.long , device=__magic_name__ , )
_snake_case = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip("Model is not available." )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
lowercase__ = os.path.join(os.environ["MYDIR"] , _lowercase )
lowercase__ = MegatronBertModel.from_pretrained(_lowercase )
model.to(_lowercase )
model.half()
lowercase__ = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
lowercase__ = model(_lowercase )[0]
lowercase__ = torch.Size((1, 9, 10_24) )
self.assertEqual(output.shape , _lowercase )
lowercase__ = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
lowercase__ = output[0, ii, jj]
lowercase__ = expected[3 * ii + jj]
lowercase__ = "ii={} jj={} a={} b={}".format(_lowercase , _lowercase , _lowercase , _lowercase )
self.assertTrue(math.isclose(_lowercase , _lowercase , rel_tol=_lowercase , abs_tol=_lowercase ) , msg=_lowercase )
| 611
| 1
|
from __future__ import annotations
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ , lowerCAmelCase__ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowerCAmelCase__ = result + left + right
return input_list
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
if len(lowerCAmelCase_ ) <= 1:
return input_list
lowerCAmelCase__ = list(lowerCAmelCase_ )
# iteration for two-way merging
lowerCAmelCase__ = 2
while p <= len(lowerCAmelCase_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i + p - 1
lowerCAmelCase__ = (low + high + 1) // 2
lowerCAmelCase__ = merge(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase_ ):
lowerCAmelCase__ = i
lowerCAmelCase__ = merge(lowerCAmelCase_ , 0 , lowerCAmelCase_ , len(lowerCAmelCase_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
UpperCamelCase = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
UpperCamelCase = []
else:
UpperCamelCase = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 61
|
lowerCAmelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _UpperCAmelCase (UpperCamelCase__ : int ):
_A : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCAmelCase__ = [None] * 10_00_00_00
lowerCAmelCase__ = True
lowerCAmelCase__ = False
def _UpperCAmelCase (UpperCamelCase__ : int ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_A : Union[str, Any] = chain(next_number(UpperCamelCase__ ) )
_A : Optional[int] = number_chain
while number < 10000000:
_A : Optional[Any] = number_chain
number *= 10
return number_chain
def _UpperCAmelCase (UpperCamelCase__ : int = 10000000 ):
for i in range(1 , UpperCamelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{solution() = }")
| 503
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowercase ( __snake_case ):
UpperCamelCase = ['''pixel_values''']
def __init__( self : List[str] , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : bool = True , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[int, float] = 1 / 2_5_5 , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = True , **__lowerCamelCase : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_2_4}
UpperCAmelCase = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCAmelCase = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase , param_name="""crop_size""" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = resample
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase = do_convert_rgb
def _lowercase ( self : Dict , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Tuple , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(__lowerCamelCase , default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase = get_resize_output_image_size(__lowerCamelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Any , __lowerCamelCase : np.ndarray , __lowerCamelCase : Dict[str, int] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Optional[int] , ) -> np.ndarray:
"""simple docstring"""
UpperCAmelCase = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[int, float] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ) -> List[Any]:
"""simple docstring"""
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : np.ndarray , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Union[float, List[float]] , __lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCamelCase : Dict , ) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : str , __lowerCamelCase : ImageInput , __lowerCamelCase : bool = None , __lowerCamelCase : Dict[str, int] = None , __lowerCamelCase : PILImageResampling = None , __lowerCamelCase : bool = None , __lowerCamelCase : int = None , __lowerCamelCase : bool = None , __lowerCamelCase : float = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : Optional[Union[float, List[float]]] = None , __lowerCamelCase : bool = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **__lowerCamelCase : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(__lowerCamelCase , param_name="""size""" , default_to_square=__lowerCamelCase )
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(__lowerCamelCase , param_name="""crop_size""" , default_to_square=__lowerCamelCase )
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_center_crop:
UpperCAmelCase = [self.center_crop(image=__lowerCamelCase , size=__lowerCamelCase ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
| 719
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627
| 0
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = ["""input_values""", """attention_mask"""]
def __init__( self , __A = 1 , __A = 16000 , __A = 0.0 , __A = False , __A = 80 , __A = 16 , __A = 64 , __A = "hann_window" , __A = 1.0 , __A = 80 , __A = 7600 , __A = 1E-10 , __A = 2 , __A = True , **__A , ):
super().__init__(feature_size=__A , sampling_rate=__A , padding_value=__A , **__A )
__a = do_normalize
__a = return_attention_mask
__a = num_mel_bins
__a = hop_length
__a = win_length
__a = win_function
__a = frame_signal_scale
__a = fmin
__a = fmax
__a = mel_floor
__a = reduction_factor
__a = win_length * sampling_rate // 1000
__a = hop_length * sampling_rate // 1000
__a = optimal_fft_length(self.sample_size )
__a = (self.n_fft // 2) + 1
__a = window_function(window_length=self.sample_size , name=self.win_function , periodic=__A )
__a = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __A , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def snake_case_ ( __A , __A , __A = 0.0 ):
if attention_mask is not None:
__a = np.array(__A , np.intaa )
__a = []
for vector, length in zip(__A , attention_mask.sum(-1 ) ):
__a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__a = padding_value
normed_input_values.append(__A )
else:
__a = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def snake_case_ ( self , __A , ):
__a = spectrogram(
__A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , __A = None , __A = None , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ):
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
__a = self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
else:
__a = None
if audio_target is not None:
__a = self._process_audio(
__A , __A , __A , __A , __A , __A , __A , __A , **__A , )
if inputs is None:
return inputs_target
else:
__a = inputs_target["""input_values"""]
__a = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
__a = decoder_attention_mask
return inputs
def snake_case_ ( self , __A , __A = False , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , **__A , ):
__a = isinstance(__A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(__A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(__A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__A , np.ndarray ):
__a = np.asarray(__A , dtype=np.floataa )
elif isinstance(__A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__a = speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [speech]
# needed to make pad() work on spectrogram inputs
__a = self.feature_size
# convert into correct format for padding
if is_target:
__a = [self._extract_mel_features(__A ) for waveform in speech]
__a = BatchFeature({"""input_values""": features} )
__a = self.num_mel_bins
else:
__a = BatchFeature({"""input_values""": speech} )
__a = self.pad(
__A , padding=__A , max_length=__A , truncation=__A , pad_to_multiple_of=__A , return_attention_mask=__A , **__A , )
__a = feature_size_hack
# convert input values to correct format
__a = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
__a = [np.asarray(__A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__a = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__a = input_values.astype(np.floataa )
# convert attention_mask to correct format
__a = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a = [np.asarray(__A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__a = (
attention_mask
if self._get_padding_strategies(__A , max_length=__A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__A , padding_value=self.padding_value )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def snake_case_ ( self ):
__a = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__a = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 99
|
'''simple docstring'''
from __future__ import annotations
a_ : int = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def a_ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def a_ ( __snake_case : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def a_ ( __snake_case : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
lowerCamelCase_, lowerCamelCase_ =location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
lowerCamelCase_ =digit
if sudoku(__snake_case ) is not None:
return grid
lowerCamelCase_ =0
return None
def a_ ( __snake_case : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 676
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __A:
def __init__( self, A, A=2, A=True, A=False, A=10, A=3, A=32 * 8, A=32 * 8, A=4, A=64, ):
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = is_training
_UpperCamelCase = use_auxiliary_loss
_UpperCamelCase = num_queries
_UpperCamelCase = num_channels
_UpperCamelCase = min_size
_UpperCamelCase = max_size
_UpperCamelCase = num_labels
_UpperCamelCase = hidden_dim
_UpperCamelCase = hidden_dim
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A )
_UpperCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size], device=A )
_UpperCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=A ) > 0.5
).float()
_UpperCamelCase = (torch.rand((self.batch_size, self.num_labels), device=A ) > 0.5).long()
_UpperCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
_UpperCamelCase = self.num_queries
_UpperCamelCase = self.num_labels
_UpperCamelCase = [1, 1, 1, 1]
_UpperCamelCase = self.num_channels
_UpperCamelCase = 64
_UpperCamelCase = 128
_UpperCamelCase = self.hidden_dim
_UpperCamelCase = self.hidden_dim
_UpperCamelCase = self.hidden_dim
return config
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
_UpperCamelCase = output.encoder_hidden_states
_UpperCamelCase = output.pixel_decoder_hidden_states
_UpperCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A ), config.decoder_layers )
def _UpperCamelCase ( self, A, A, A, A=False ):
"""simple docstring"""
with torch.no_grad():
_UpperCamelCase = MaskaFormerModel(config=A )
model.to(A )
model.eval()
_UpperCamelCase = model(pixel_values=A, pixel_mask=A )
_UpperCamelCase = model(A, output_hidden_states=A )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A, A )
def _UpperCamelCase ( self, A, A, A, A, A ):
"""simple docstring"""
_UpperCamelCase = MaskaFormerForUniversalSegmentation(config=A )
model.to(A )
model.eval()
def comm_check_on_output(A ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCamelCase = model(pixel_values=A, pixel_mask=A )
_UpperCamelCase = model(A )
comm_check_on_output(A )
_UpperCamelCase = model(
pixel_values=A, pixel_mask=A, mask_labels=A, class_labels=A )
comm_check_on_output(A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class __A( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__A = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaskaFormerModelTester(self )
_UpperCamelCase = ConfigTester(self, config_class=A, has_text_modality=A )
def _UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A, **A, output_hidden_states=A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(A )
_UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1], A )
@slow
def _UpperCamelCase ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCamelCase = MaskaFormerModel.from_pretrained(A )
self.assertIsNotNone(A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = (self.model_tester.min_size,) * 2
_UpperCamelCase = {
'''pixel_values''': torch.randn((2, 3, *size), device=A ),
'''mask_labels''': torch.randn((2, 10, *size), device=A ),
'''class_labels''': torch.zeros(2, 10, device=A ).long(),
}
_UpperCamelCase = self.model_tester.get_config()
_UpperCamelCase = MaskaFormerForUniversalSegmentation(A ).to(A )
_UpperCamelCase = model(**A )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A, **A, output_hidden_states=A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(A ).to(A )
_UpperCamelCase = model(**A, output_attentions=A )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = model_class(A )
model.to(A )
model.train()
_UpperCamelCase = model(A, mask_labels=A, class_labels=A ).loss
loss.backward()
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.all_model_classes[1]
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = model_class(A ).to(A )
model.train()
_UpperCamelCase = model(A, mask_labels=A, class_labels=A )
_UpperCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase : Tuple = 1e-4
def SCREAMING_SNAKE_CASE ( ):
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __A( unittest.TestCase ):
@cached_property
def _UpperCamelCase ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(A, return_tensors='''pt''' ).to(A )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A, (1, 3, 384, 384) )
with torch.no_grad():
_UpperCamelCase = model(**A )
_UpperCamelCase = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], A, atol=A ) )
_UpperCamelCase = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], A, atol=A ) )
_UpperCamelCase = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], A, atol=A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(A, return_tensors='''pt''' ).to(A )
_UpperCamelCase = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A, (1, 3, 384, 384) )
with torch.no_grad():
_UpperCamelCase = model(**A )
# masks_queries_logits
_UpperCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCamelCase = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
_UpperCamelCase = torch.tensor(A ).to(A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], A, atol=A ) )
# class_queries_logits
_UpperCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCamelCase = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], A, atol=A ) )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A ).eval()
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors='''pt''', )
_UpperCamelCase = inputs['''pixel_values'''].to(A )
_UpperCamelCase = [el.to(A ) for el in inputs['''mask_labels''']]
_UpperCamelCase = [el.to(A ) for el in inputs['''class_labels''']]
with torch.no_grad():
_UpperCamelCase = model(**A )
self.assertTrue(outputs.loss is not None )
| 105
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = mock.Mock()
_UpperCamelCase = 500
_UpperCamelCase = {}
_UpperCamelCase = HTTPError
_UpperCamelCase = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=A ) as mock_head:
_UpperCamelCase = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = mock.Mock()
_UpperCamelCase = 500
_UpperCamelCase = {}
_UpperCamelCase = HTTPError
_UpperCamelCase = {}
# Download this model to make sure it's in the cache.
_UpperCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''', return_value=A ) as mock_head:
_UpperCamelCase = GPTaTokenizerFast.from_pretrained('''gpt2''' )
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCamelCase ( self ):
"""simple docstring"""
try:
_UpperCamelCase = tempfile.mktemp()
with open(A, '''wb''' ) as f:
http_get('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', A )
_UpperCamelCase = AlbertTokenizer.from_pretrained(A )
finally:
os.remove(A )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('''tokenizer.json''' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('''tokenizer.json''', '''wb''' ) as f:
http_get('''https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json''', A )
_UpperCamelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('''tokenizer.json''' )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = AlbertTokenizer.from_pretrained('''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''' )
@is_staging_test
class __A( unittest.TestCase ):
__A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def _UpperCamelCase ( cls ):
"""simple docstring"""
_UpperCamelCase = TOKEN
HfFolder.save_token(A )
@classmethod
def _UpperCamelCase ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id='''test-tokenizer''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''valid_org/test-tokenizer-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='''test-dynamic-tokenizer''' )
except HTTPError:
pass
def _UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = BertTokenizer(A )
tokenizer.push_to_hub('''test-tokenizer''', use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''test-tokenizer''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(A, repo_id='''test-tokenizer''', push_to_hub=A, use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def _UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = BertTokenizer(A )
tokenizer.push_to_hub('''valid_org/test-tokenizer-org''', use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id='''valid_org/test-tokenizer-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
A, repo_id='''valid_org/test-tokenizer-org''', push_to_hub=A, use_auth_token=self._token )
_UpperCamelCase = BertTokenizer.from_pretrained('''valid_org/test-tokenizer-org''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def _UpperCamelCase ( self ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = CustomTokenizer(A )
# No fast custom tokenizer
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
_UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(A, '''vocab.txt''' )
with open(A, '''w''', encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = BertTokenizerFast.from_pretrained(A )
bert_tokenizer.save_pretrained(A )
_UpperCamelCase = CustomTokenizerFast.from_pretrained(A )
tokenizer.push_to_hub('''test-dynamic-tokenizer''', use_auth_token=self._token )
_UpperCamelCase = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizerFast''' )
_UpperCamelCase = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=A, trust_remote_code=A )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, '''CustomTokenizer''' )
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''Hello 友達''' )
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
trie.add('''Hello''' )
trie.data
self.assertEqual(trie.data, {'''H''': {'''e''': {'''l''': {'''l''': {'''o''': {'''''': 1, ''' ''': {'''友''': {'''達''': {'''''': 1}}}}}}}}} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS] This is a extra_id_100'''] )
trie.add('''[CLS]''' )
trie.add('''extra_id_1''' )
trie.add('''extra_id_100''' )
self.assertEqual(trie.split('''[CLS] This is a extra_id_100''' ), ['''[CLS]''', ''' This is a ''', '''extra_id_100'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''A''' )
self.assertEqual(trie.split('''ABC''' ), ['''A''', '''BC'''] )
self.assertEqual(trie.split('''BCA''' ), ['''BC''', '''A'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''TOKEN]''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''A''' )
trie.add('''P''' )
trie.add('''[SPECIAL_TOKEN]''' )
self.assertEqual(trie.split('''This is something [SPECIAL_TOKEN]''' ), ['''This is something ''', '''[SPECIAL_TOKEN]'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''AB''' )
trie.add('''B''' )
trie.add('''C''' )
self.assertEqual(trie.split('''ABC''' ), ['''AB''', '''C'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
trie.add('''ABC''' )
trie.add('''B''' )
trie.add('''CD''' )
self.assertEqual(trie.split('''ABCD''' ), ['''ABC''', '''D'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = Trie()
_UpperCamelCase = trie.cut_text('''ABC''', [0, 0, 2, 1, 2, 3] )
self.assertEqual(A, ['''AB''', '''C'''] )
| 105
| 1
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_lowercase = False
try:
_lowercase = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase = None , _lowercase = [] ):
"""simple docstring"""
_lowerCAmelCase = 0
_lowerCAmelCase = choices
_lowerCAmelCase = prompt
if sys.platform == "win32":
_lowerCAmelCase = """*"""
else:
_lowerCAmelCase = """➔ """
def _lowercase ( self , _lowercase , _lowercase = "" ):
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _lowercase )
else:
forceWrite(self.choices[index] , _lowercase )
def _lowercase ( self , _lowercase ):
"""simple docstring"""
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(_lowercase )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _lowercase ( self , _lowercase , _lowercase = 1 ):
"""simple docstring"""
_lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_lowercase )
move_cursor(_lowercase , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self ):
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self ):
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_lowercase )] for number in range(10 )] )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = int(chr(self.current_selection ) )
_lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _lowercase )
else:
return
else:
return
def _lowercase ( self , _lowercase = 0 ):
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
_lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_lowercase )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
_lowerCAmelCase = int(builtins.input() )
except ValueError:
_lowerCAmelCase = default_choice
else:
_lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(_lowercase , """\n""" )
return choice
| 5
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A :
def __init__( self , SCREAMING_SNAKE_CASE , ) -> int:
"""simple docstring"""
A : str = parent
A : Optional[int] = 13
A : Union[str, Any] = 7
A : Optional[Any] = 30
A : Optional[int] = self.seq_length + self.mem_len
A : int = 15
A : Any = True
A : Union[str, Any] = True
A : Optional[int] = 99
A : Any = [10, 50, 80]
A : Any = 32
A : Any = 32
A : Any = 4
A : Any = 8
A : Optional[int] = 128
A : Union[str, Any] = 2
A : List[str] = 2
A : Optional[Any] = None
A : int = 1
A : str = 0
A : Optional[int] = 3
A : Union[str, Any] = self.vocab_size - 1
A : str = 0.01
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : List[str] = None
if self.use_labels:
A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Optional[Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : int = TFTransfoXLModel(SCREAMING_SNAKE_CASE )
A, A : int = model(SCREAMING_SNAKE_CASE ).to_tuple()
A : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
A, A : Any = model(SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
A : Tuple = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE )
A, A : Optional[int] = model(SCREAMING_SNAKE_CASE ).to_tuple()
A : Tuple = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
A, A : Dict = model(SCREAMING_SNAKE_CASE ).to_tuple()
A, A : Optional[Any] = model([input_ids_a, mems_a] ).to_tuple()
A : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
A, A : List[Any] = model(SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
A : List[Any] = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE )
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.prepare_config_and_inputs()
((A), (A), (A), (A)) : Dict = config_and_inputs
A : List[str] = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__magic_name__ = () if is_tf_available() else ()
__magic_name__ = (
{
'''feature-extraction''': TFTransfoXLModel,
'''text-classification''': TFTransfoXLForSequenceClassification,
'''text-generation''': TFTransfoXLLMHeadModel,
'''zero-shot''': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : str = TFTransfoXLModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , d_embed=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self.model_tester.set_seed()
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.model_tester.set_seed()
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A, A : int = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
A : int = model_class(SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
A : List[Any] = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer )
A : List[Any] = model.get_bias()
assert name is None
else:
A : int = model.get_output_embeddings()
assert x is None
A : List[Any] = model.get_bias()
assert name is None
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
pass
@slow
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
pass
@require_tf
class A ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : List[str] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
A : List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
A : str = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
A : Optional[Any] = model.generate(SCREAMING_SNAKE_CASE , max_length=200 , do_sample=SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE )
| 634
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = KandinskyVaaInpaintPipeline
__UpperCAmelCase = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__UpperCAmelCase = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__UpperCAmelCase = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__UpperCAmelCase = False
@property
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : int ):
'''simple docstring'''
return 3_2
@property
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
return 1_0_0
@property
def __magic_name__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case__ : Any = UNetaDConditionModel(**snake_case_ )
return model
@property
def __magic_name__ ( self : Dict ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Optional[Any] = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=snake_case_ , )
snake_case__ : Union[str, Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[str]=0 ):
'''simple docstring'''
snake_case__ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case_ )
# create init_image
snake_case__ : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
snake_case__ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Dict = Image.fromarray(np.uinta(snake_case_ ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Tuple = np.ones((6_4, 6_4) , dtype=np.floataa )
snake_case__ : Optional[Any] = 0
if str(snake_case_ ).startswith('''mps''' ):
snake_case__ : Optional[int] = torch.manual_seed(snake_case_ )
else:
snake_case__ : Any = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : List[str] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : Union[str, Any] = '''cpu'''
snake_case__ : int = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**snake_case_ )
snake_case__ : List[str] = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = pipe(**self.get_dummy_inputs(snake_case_ ) )
snake_case__ : int = output.images
snake_case__ : Optional[Any] = pipe(
**self.get_dummy_inputs(snake_case_ ) , return_dict=snake_case_ , )[0]
snake_case__ : Tuple = image[0, -3:, -3:, -1]
snake_case__ : Any = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Tuple = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
snake_case__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case__ : Optional[Any] = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
snake_case__ : Tuple = 0
snake_case__ : Tuple = '''a hat'''
snake_case__ : str = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case_ )
snake_case__ : Any = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
snake_case__ : Dict = pipeline.to(snake_case_ )
pipeline.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case__ , snake_case__ : List[str] = pipe_prior(
snake_case_ , generator=snake_case_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case__ : Optional[Any] = pipeline(
image=snake_case_ , mask_image=snake_case_ , image_embeds=snake_case_ , negative_image_embeds=snake_case_ , generator=snake_case_ , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
snake_case__ : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(snake_case_ , snake_case_ )
| 502
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__UpperCAmelCase = """ctrl"""
__UpperCAmelCase = ["""past_key_values"""]
__UpperCAmelCase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , snake_case_ : Dict=2_4_6_5_3_4 , snake_case_ : Optional[int]=2_5_6 , snake_case_ : Dict=1_2_8_0 , snake_case_ : Union[str, Any]=8_1_9_2 , snake_case_ : Any=4_8 , snake_case_ : List[Any]=1_6 , snake_case_ : Optional[Any]=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Optional[Any]=1e-6 , snake_case_ : List[Any]=0.0_2 , snake_case_ : Dict=True , **snake_case_ : List[Any] , ):
'''simple docstring'''
snake_case__ : Any = vocab_size
snake_case__ : int = n_positions
snake_case__ : Optional[int] = n_embd
snake_case__ : str = n_layer
snake_case__ : Any = n_head
snake_case__ : str = dff
snake_case__ : Any = resid_pdrop
snake_case__ : Tuple = embd_pdrop
snake_case__ : List[str] = layer_norm_epsilon
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = use_cache
super().__init__(**snake_case_ )
| 502
| 1
|
"""simple docstring"""
import sys
from collections import defaultdict
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
lowerCAmelCase__ :str = []
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.node_position[vertex]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = pos
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ :Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ :List[str] = 2 * start + 1
else:
lowerCAmelCase__ :Optional[Any] = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ :Dict = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ :Any = (
heap[start],
positions[start],
)
lowerCAmelCase__ :Optional[int] = temp, tempa
lowerCAmelCase__ :Optional[int] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = position[index]
while index != 0:
lowerCAmelCase__ :List[Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ :Optional[Any] = heap[parent]
lowerCAmelCase__ :Union[str, Any] = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
lowerCAmelCase__ :Optional[Any] = val
lowerCAmelCase__ :Optional[int] = temp
self.set_position(snake_case_ , snake_case_ )
break
lowerCAmelCase__ :Any = parent
else:
lowerCAmelCase__ :Any = val
lowerCAmelCase__ :List[Any] = temp
self.set_position(snake_case_ , 0 )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = positions[0]
lowerCAmelCase__ :str = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = Heap()
lowerCAmelCase__ :Optional[int] = [0] * len(__lowerCAmelCase )
lowerCAmelCase__ :Optional[Any] = [-1] * len(__lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ :Optional[Any] = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ :Tuple = []
for vertex in range(len(__lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__lowerCAmelCase )
heap.node_position.append(__lowerCAmelCase )
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[int] = 1
lowerCAmelCase__ :int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ :str = 0
lowerCAmelCase__ :Optional[Any] = distance
heap.heapify(__lowerCAmelCase , __lowerCAmelCase )
for _ in range(1 , len(__lowerCAmelCase ) ):
lowerCAmelCase__ :Optional[int] = heap.delete_minimum(__lowerCAmelCase , __lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ :Dict = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__lowerCAmelCase )]
):
lowerCAmelCase__ :Tuple = distance
heap.bottom_to_top(
__lowerCAmelCase , heap.get_position(__lowerCAmelCase ) , __lowerCAmelCase , __lowerCAmelCase )
lowerCAmelCase__ :Dict = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("""Enter number of edges: """).strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 93
|
'''simple docstring'''
def _a ( __lowerCAmelCase : str ):
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[Any] = sum(__lowerCAmelCase )
snake_case__ : Any = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case__ : Dict = True
for i in range(1 , s + 1 ):
snake_case__ : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case__ : Tuple = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case__ : str = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case__ : Union[str, Any] = s - 2 * j
break
return diff
| 347
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCAmelCase ( __a : str ,__a : List[str] ) -> Tuple:
"""simple docstring"""
_a : List[str] = int(__a )
assert noofclusters < len(__a )
# Find out the dimensionality
_a : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
_a : Dict = list(range(len(__a ) ) )
shuffle(__a )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
_a : str = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
_a : Optional[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
_a : Any = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__a )
]
##These nodes will assign the centroid Variables the appropriate
##values
_a : Any = tf.placeholder('''float64''' ,[dim] )
_a : Optional[int] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__a ,__a ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
_a : Dict = [tf.Variable(0 ) for i in range(len(__a ) )]
##These nodes will assign an assignment Variable the appropriate
##value
_a : Optional[Any] = tf.placeholder('''int32''' )
_a : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__a ,__a ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
_a : Optional[Any] = tf.placeholder('''float''' ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
_a : Union[str, Any] = tf.reduce_mean(__a ,0 )
##Node for computing Euclidean distances
# Placeholders for input
_a : str = tf.placeholder('''float''' ,[dim] )
_a : List[Any] = tf.placeholder('''float''' ,[dim] )
_a : List[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__a ,__a ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
_a : Optional[int] = tf.placeholder('''float''' ,[noofclusters] )
_a : List[Any] = tf.argmin(__a ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
_a : Optional[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(__a )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
_a : Union[str, Any] = 100
for _ in range(__a ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__a ) ):
_a : Dict = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
_a : Union[str, Any] = [
sess.run(__a ,feed_dict={va: vect, va: sess.run(__a )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
_a : Optional[Any] = sess.run(
__a ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__a ):
# Collect all the vectors assigned to this cluster
_a : List[str] = [
vectors[i]
for i in range(len(__a ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
_a : Union[str, Any] = sess.run(
__a ,feed_dict={mean_input: array(__a )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
_a : List[str] = sess.run(__a )
_a : Optional[int] = sess.run(__a )
return centroids, assignments
| 578
|
import functools
def __UpperCAmelCase ( __a : str ,__a : str ) -> int:
"""simple docstring"""
_a : List[str] = len(__a )
_a : int = len(__a )
@functools.cache
def min_distance(__a : int ,__a : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_a : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 ,__a ) ,1 + min_distance(__a ,indexa + 1 ) ,diff + min_distance(indexa + 1 ,indexa + 1 ) ,)
return min_distance(0 ,0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 578
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=0.0_2 , ) -> int:
__magic_name__ : List[str] = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Any = seq_length
__magic_name__ : str = is_training
__magic_name__ : List[str] = use_input_mask
__magic_name__ : Union[str, Any] = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : List[Any] = rotary_dim
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Optional[int] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[int] = None
__magic_name__ : List[Any] = vocab_size - 1
__magic_name__ : Any = vocab_size - 1
__magic_name__ : List[Any] = vocab_size - 1
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __magic_name__ ( self ) -> str:
__magic_name__ : str = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : int = config_and_inputs
__magic_name__ : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : List[str] = 20
__magic_name__ : Dict = model_class_name(lowerCAmelCase__ )
__magic_name__ : Optional[int] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : List[str] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
__magic_name__ : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : str = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase__ , )
__magic_name__ : Optional[int] = model(lowerCAmelCase__ )
__magic_name__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Any = 20
__magic_name__ : Tuple = model_class_name(lowerCAmelCase__ )
__magic_name__ : int = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__magic_name__ : Union[str, Any] = model.init_cache(input_ids.shape[0] , lowerCAmelCase__ )
__magic_name__ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__magic_name__ : Dict = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
__magic_name__ : Union[str, Any] = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , )
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
__magic_name__ : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : List[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowercase__ : List[str] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : List[str] = FlaxGPTJModelTester(self )
def __magic_name__ ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
__magic_name__ ,__magic_name__ ,__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@tooslow
def __magic_name__ ( self ) -> int:
__magic_name__ : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
__magic_name__ : Optional[Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ )
__magic_name__ : int = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : str = False
__magic_name__ : str = model.config.eos_token_id
__magic_name__ : Optional[int] = jax.jit(model.generate )
__magic_name__ : Tuple = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
__magic_name__ : List[Any] = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__magic_name__ : Any = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ ,__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : str = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : int = pt_inputs["""input_ids"""].shape
__magic_name__ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : str = 0
__magic_name__ : Any = 1
__magic_name__ : Union[str, Any] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Dict = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase__ )
__magic_name__ : List[str] = fx_state
with torch.no_grad():
__magic_name__ : Dict = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : str = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : str = model_class.from_pretrained(lowerCAmelCase__ , from_pt=lowerCAmelCase__ )
__magic_name__ : List[str] = fx_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def __magic_name__ ( self ) -> Tuple:
__magic_name__ ,__magic_name__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__magic_name__ : Dict = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__magic_name__ : Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
__magic_name__ : str = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : List[str] = pt_model_class(lowerCAmelCase__ ).eval()
__magic_name__ : Optional[Any] = model_class(lowerCAmelCase__ , dtype=jnp.floataa )
__magic_name__ : Optional[int] = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , fx_model.params )
__magic_name__ ,__magic_name__ : Union[str, Any] = pt_inputs["""input_ids"""].shape
__magic_name__ : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
__magic_name__ : str = 0
__magic_name__ : Dict = 1
__magic_name__ : List[Any] = 0
__magic_name__ : int = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__magic_name__ : int = pt_model(**lowerCAmelCase__ ).to_tuple()
__magic_name__ : Tuple = fx_model(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase__ )
__magic_name__ : List[Any] = pt_model_class.from_pretrained(lowerCAmelCase__ , from_flax=lowerCAmelCase__ )
with torch.no_grad():
__magic_name__ : Tuple = pt_model_loaded(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(
len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def __magic_name__ ( self ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ : Tuple = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
__magic_name__ : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 324
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_lowerCAmelCase )
class snake_case__ ( _lowerCAmelCase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowercase__ : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowercase__ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowercase__ : str = "text"
lowercase__ : str = "labels"
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__magic_name__ : Union[str, Any] = copy.deepcopy(self )
__magic_name__ : Optional[int] = self.label_schema.copy()
__magic_name__ : Union[str, Any] = features[self.label_column]
__magic_name__ : int = label_schema
return task_template
@property
def __magic_name__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 324
| 1
|
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
A_ = NewType("DataClass", Any)
A_ = NewType("DataClassType", Any)
def A_ ( snake_case ):
if isinstance(snake_case , snake_case ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = {str(snake_case ): choice for choice in choices}
return lambda snake_case : str_to_choice.get(snake_case , snake_case )
def A_ ( *,
snake_case = None , snake_case = None , snake_case = dataclasses.MISSING , snake_case = dataclasses.MISSING , snake_case = None , **snake_case , ):
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
SCREAMING_SNAKE_CASE:str = {}
if aliases is not None:
SCREAMING_SNAKE_CASE:Optional[int] = aliases
if help is not None:
SCREAMING_SNAKE_CASE:Optional[Any] = help
return dataclasses.field(metadata=snake_case , default=snake_case , default_factory=snake_case , **snake_case )
class _snake_case ( _a ):
_A : Iterable[DataClassType]
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Union[DataClassType, Iterable[DataClassType]] ,**SCREAMING_SNAKE_CASE__ : Dict ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
SCREAMING_SNAKE_CASE:List[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**SCREAMING_SNAKE_CASE__ )
if dataclasses.is_dataclass(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Union[str, Any] = [dataclass_types]
SCREAMING_SNAKE_CASE:Optional[int] = list(SCREAMING_SNAKE_CASE__ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(SCREAMING_SNAKE_CASE__ )
@staticmethod
def __UpperCamelCase ( SCREAMING_SNAKE_CASE__ : ArgumentParser ,SCREAMING_SNAKE_CASE__ : dataclasses.Field ):
SCREAMING_SNAKE_CASE:Dict = F'''--{field.name}'''
SCREAMING_SNAKE_CASE:Any = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type ,SCREAMING_SNAKE_CASE__ ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
SCREAMING_SNAKE_CASE:Any = kwargs.pop("aliases" ,[] )
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Union[str, Any] = [aliases]
SCREAMING_SNAKE_CASE:Dict = getattr(field.type ,"__origin__" ,field.type )
if origin_type is Union or (hasattr(SCREAMING_SNAKE_CASE__ ,"UnionType" ) and isinstance(SCREAMING_SNAKE_CASE__ ,types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(SCREAMING_SNAKE_CASE__ ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F''' Problem encountered in field \'{field.name}\'.''' )
if type(SCREAMING_SNAKE_CASE__ ) not in field.type.__args__:
# filter `str` in Union
SCREAMING_SNAKE_CASE:Tuple = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
SCREAMING_SNAKE_CASE:Tuple = getattr(field.type ,"__origin__" ,field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
SCREAMING_SNAKE_CASE:int = (
field.type.__args__[0] if isinstance(SCREAMING_SNAKE_CASE__ ,field.type.__args__[1] ) else field.type.__args__[1]
)
SCREAMING_SNAKE_CASE:Tuple = getattr(field.type ,"__origin__" ,field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
SCREAMING_SNAKE_CASE:Optional[int] = {}
if origin_type is Literal or (isinstance(field.type ,SCREAMING_SNAKE_CASE__ ) and issubclass(field.type ,SCREAMING_SNAKE_CASE__ )):
if origin_type is Literal:
SCREAMING_SNAKE_CASE:Tuple = field.type.__args__
else:
SCREAMING_SNAKE_CASE:int = [x.value for x in field.type]
SCREAMING_SNAKE_CASE:Optional[int] = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE:Optional[int] = field.default
else:
SCREAMING_SNAKE_CASE:List[str] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
SCREAMING_SNAKE_CASE:Tuple = copy(SCREAMING_SNAKE_CASE__ )
# Hack because type=bool in argparse does not behave as we want.
SCREAMING_SNAKE_CASE:str = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
SCREAMING_SNAKE_CASE:Tuple = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
SCREAMING_SNAKE_CASE:Union[str, Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
SCREAMING_SNAKE_CASE:Dict = "?"
# This is the value that will get picked if we do --field_name (without value)
SCREAMING_SNAKE_CASE:List[Any] = True
elif isclass(SCREAMING_SNAKE_CASE__ ) and issubclass(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Any = field.type.__args__[0]
SCREAMING_SNAKE_CASE:Optional[int] = "+"
if field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE:List[str] = field.default_factory()
elif field.default is dataclasses.MISSING:
SCREAMING_SNAKE_CASE:int = True
else:
SCREAMING_SNAKE_CASE:Tuple = field.type
if field.default is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE:Any = field.default
elif field.default_factory is not dataclasses.MISSING:
SCREAMING_SNAKE_CASE:Tuple = field.default_factory()
else:
SCREAMING_SNAKE_CASE:int = True
parser.add_argument(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
SCREAMING_SNAKE_CASE:Tuple = False
parser.add_argument(F'''--no_{field.name}''' ,action="store_false" ,dest=field.name ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : DataClassType ):
if hasattr(SCREAMING_SNAKE_CASE__ ,"_argument_group_name" ):
SCREAMING_SNAKE_CASE:Any = self.add_argument_group(dtype._argument_group_name )
else:
SCREAMING_SNAKE_CASE:Union[str, Any] = self
try:
SCREAMING_SNAKE_CASE:Dict[str, type] = get_type_hints(SCREAMING_SNAKE_CASE__ )
except NameError:
raise RuntimeError(
F'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Dict = ".".join(map(SCREAMING_SNAKE_CASE__ ,sys.version_info[:3] ) )
raise RuntimeError(
F'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(SCREAMING_SNAKE_CASE__ ):
if not field.init:
continue
SCREAMING_SNAKE_CASE:Union[str, Any] = type_hints[field.name]
self._parse_dataclass_field(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Tuple=False ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Tuple=None ,):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
SCREAMING_SNAKE_CASE:Optional[int] = []
if args_filename:
args_files.append(Path(SCREAMING_SNAKE_CASE__ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
SCREAMING_SNAKE_CASE:Optional[Any] = ArgumentParser()
args_file_parser.add_argument(SCREAMING_SNAKE_CASE__ ,type=SCREAMING_SNAKE_CASE__ ,action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
SCREAMING_SNAKE_CASE:Optional[int] = args_file_parser.parse_known_args(args=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[str] = vars(SCREAMING_SNAKE_CASE__ ).get(args_file_flag.lstrip("-" ) ,SCREAMING_SNAKE_CASE__ )
if cmd_args_file_paths:
args_files.extend([Path(SCREAMING_SNAKE_CASE__ ) for p in cmd_args_file_paths] )
SCREAMING_SNAKE_CASE:List[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
SCREAMING_SNAKE_CASE:Optional[int] = file_args + args if args is not None else file_args + sys.argv[1:]
SCREAMING_SNAKE_CASE:List[str] = self.parse_known_args(args=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:int = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE:Union[str, Any] = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE__ ) if f.init}
SCREAMING_SNAKE_CASE:Optional[int] = {k: v for k, v in vars(SCREAMING_SNAKE_CASE__ ).items() if k in keys}
for k in keys:
delattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = dtype(**SCREAMING_SNAKE_CASE__ )
outputs.append(SCREAMING_SNAKE_CASE__ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(SCREAMING_SNAKE_CASE__ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict[str, Any] ,SCREAMING_SNAKE_CASE__ : bool = False ):
SCREAMING_SNAKE_CASE:Optional[Any] = set(args.keys() )
SCREAMING_SNAKE_CASE:Optional[int] = []
for dtype in self.dataclass_types:
SCREAMING_SNAKE_CASE:Any = {f.name for f in dataclasses.fields(SCREAMING_SNAKE_CASE__ ) if f.init}
SCREAMING_SNAKE_CASE:Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
SCREAMING_SNAKE_CASE:str = dtype(**SCREAMING_SNAKE_CASE__ )
outputs.append(SCREAMING_SNAKE_CASE__ )
if not allow_extra_keys and unused_keys:
raise ValueError(F'''Some keys are not used by the HfArgumentParser: {sorted(SCREAMING_SNAKE_CASE__ )}''' )
return tuple(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : bool = False ):
with open(Path(SCREAMING_SNAKE_CASE__ ) ,encoding="utf-8" ) as open_json_file:
SCREAMING_SNAKE_CASE:List[Any] = json.loads(open_json_file.read() )
SCREAMING_SNAKE_CASE:Dict = self.parse_dict(SCREAMING_SNAKE_CASE__ ,allow_extra_keys=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : bool = False ):
SCREAMING_SNAKE_CASE:Any = self.parse_dict(yaml.safe_load(Path(SCREAMING_SNAKE_CASE__ ).read_text() ) ,allow_extra_keys=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 700
|
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ = logging.get_logger(__name__)
class _snake_case ( _a ):
_A : List[Any] = '''mask2former'''
_A : str = ['''swin''']
_A : Tuple = {'''hidden_size''': '''hidden_dim'''}
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Dict] = None ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 256 ,SCREAMING_SNAKE_CASE__ : int = 1_024 ,SCREAMING_SNAKE_CASE__ : str = "relu" ,SCREAMING_SNAKE_CASE__ : int = 6 ,SCREAMING_SNAKE_CASE__ : int = 10 ,SCREAMING_SNAKE_CASE__ : int = 8 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : int = 2_048 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : int = 4 ,SCREAMING_SNAKE_CASE__ : int = 255 ,SCREAMING_SNAKE_CASE__ : int = 100 ,SCREAMING_SNAKE_CASE__ : float = 0.1 ,SCREAMING_SNAKE_CASE__ : float = 2.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : float = 5.0 ,SCREAMING_SNAKE_CASE__ : int = 12_544 ,SCREAMING_SNAKE_CASE__ : float = 3.0 ,SCREAMING_SNAKE_CASE__ : float = 0.75 ,SCREAMING_SNAKE_CASE__ : float = 0.02 ,SCREAMING_SNAKE_CASE__ : float = 1.0 ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : List[int] = [4, 8, 16, 32] ,SCREAMING_SNAKE_CASE__ : bool = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
SCREAMING_SNAKE_CASE:Union[str, Any] = CONFIG_MAPPING["swin"](
image_size=224 ,in_channels=3 ,patch_size=4 ,embed_dim=96 ,depths=[2, 2, 18, 2] ,num_heads=[3, 6, 12, 24] ,window_size=7 ,drop_path_rate=0.3 ,use_absolute_embeddings=SCREAMING_SNAKE_CASE__ ,out_features=["stage1", "stage2", "stage3", "stage4"] ,)
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE:Any = backbone_config.pop("model_type" )
SCREAMING_SNAKE_CASE:List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE:List[str] = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {",".join(self.backbones_supported )}''' )
SCREAMING_SNAKE_CASE:List[str] = backbone_config
SCREAMING_SNAKE_CASE:Union[str, Any] = feature_size
SCREAMING_SNAKE_CASE:Union[str, Any] = mask_feature_size
SCREAMING_SNAKE_CASE:str = hidden_dim
SCREAMING_SNAKE_CASE:Optional[int] = encoder_feedforward_dim
SCREAMING_SNAKE_CASE:Tuple = activation_function
SCREAMING_SNAKE_CASE:Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE:List[Any] = decoder_layers
SCREAMING_SNAKE_CASE:Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE:Dict = dropout
SCREAMING_SNAKE_CASE:List[str] = dim_feedforward
SCREAMING_SNAKE_CASE:int = pre_norm
SCREAMING_SNAKE_CASE:Union[str, Any] = enforce_input_projection
SCREAMING_SNAKE_CASE:Any = common_stride
SCREAMING_SNAKE_CASE:int = ignore_value
SCREAMING_SNAKE_CASE:List[Any] = num_queries
SCREAMING_SNAKE_CASE:Dict = no_object_weight
SCREAMING_SNAKE_CASE:str = class_weight
SCREAMING_SNAKE_CASE:Tuple = mask_weight
SCREAMING_SNAKE_CASE:Optional[int] = dice_weight
SCREAMING_SNAKE_CASE:int = train_num_points
SCREAMING_SNAKE_CASE:str = oversample_ratio
SCREAMING_SNAKE_CASE:str = importance_sample_ratio
SCREAMING_SNAKE_CASE:str = init_std
SCREAMING_SNAKE_CASE:Any = init_xavier_std
SCREAMING_SNAKE_CASE:List[Any] = use_auxiliary_loss
SCREAMING_SNAKE_CASE:Union[str, Any] = feature_strides
SCREAMING_SNAKE_CASE:Union[str, Any] = output_auxiliary_logits
SCREAMING_SNAKE_CASE:Dict = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE__ )
@classmethod
def __UpperCamelCase ( cls : Tuple ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,**SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return cls(
backbone_config=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE:str = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE:List[str] = self.__class__.model_type
return output
| 465
| 0
|
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def A ( __UpperCamelCase ) -> List[str]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X20000 and cp <= 0X2a6df) #
or (cp >= 0X2a700 and cp <= 0X2b73f) #
or (cp >= 0X2b740 and cp <= 0X2b81f) #
or (cp >= 0X2b820 and cp <= 0X2ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2f800 and cp <= 0X2fa1f) #
): #
return True
return False
def A ( __UpperCamelCase ) -> str:
# word like '180' or '身高' or '神'
for char in word:
A__ = ord(__UpperCamelCase )
if not _is_chinese_char(__UpperCamelCase ):
return 0
return 1
def A ( __UpperCamelCase ) -> str:
A__ = set()
for token in tokens:
A__ = len(__UpperCamelCase ) > 1 and is_chinese(__UpperCamelCase )
if chinese_word:
word_set.add(__UpperCamelCase )
A__ = list(__UpperCamelCase )
return word_list
def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
if not chinese_word_set:
return bert_tokens
A__ = max([len(__UpperCamelCase ) for w in chinese_word_set] )
A__ = bert_tokens
A__ , A__ = 0, len(__UpperCamelCase )
while start < end:
A__ = True
if is_chinese(bert_word[start] ):
A__ = min(end - start , __UpperCamelCase )
for i in range(__UpperCamelCase , 1 , -1 ):
A__ = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
A__ = '##' + bert_word[j]
A__ = start + i
A__ = False
break
if single_word:
start += 1
return bert_word
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
A__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
A__ = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=['cws'] ).cws
A__ = [get_chinese_word(__UpperCamelCase ) for r in res]
ltp_res.extend(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ = []
for i in range(0 , len(__UpperCamelCase ) , 100 ):
A__ = bert_tokenizer(lines[i : i + 100] , add_special_tokens=__UpperCamelCase , truncation=__UpperCamelCase , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
A__ = []
for input_ids, chinese_word in zip(__UpperCamelCase , __UpperCamelCase ):
A__ = []
for id in input_ids:
A__ = bert_tokenizer._convert_id_to_token(__UpperCamelCase )
input_tokens.append(__UpperCamelCase )
A__ = add_sub_symbol(__UpperCamelCase , __UpperCamelCase )
A__ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__UpperCamelCase ):
if token[:2] == "##":
A__ = token[2:]
# save chinese tokens' pos
if len(__UpperCamelCase ) == 1 and _is_chinese_char(ord(__UpperCamelCase ) ):
ref_id.append(__UpperCamelCase )
ref_ids.append(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
return ref_ids
def A ( __UpperCamelCase ) -> List[str]:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
A__ = f.readlines()
A__ = [line.strip() for line in data if len(__UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A__ = LTP(args.ltp ) # faster in GPU device
A__ = BertTokenizer.from_pretrained(args.bert )
A__ = prepare_ref(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
A__ = [json.dumps(__UpperCamelCase ) + '\n' for ref in ref_ids]
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 9
|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
a__ : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any:
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , UpperCAmelCase__ , )
super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
| 682
| 0
|
import qiskit
def lowerCAmelCase ( _lowerCAmelCase : int , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
UpperCAmelCase__ = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
UpperCAmelCase__ = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : int = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 364
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase ( _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = BigBirdConfig.from_json_file(_lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
if is_trivia_qa:
UpperCAmelCase__ = BigBirdForQuestionAnswering(_lowerCAmelCase )
else:
UpperCAmelCase__ = BigBirdForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(_lowerCAmelCase , _lowerCAmelCase , is_trivia_qa=_lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 364
| 1
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase ( UpperCamelCase_ ):
def __init__( self : Dict , a__ : Dict , a__ : int=13 , a__ : Union[str, Any]=7 , a__ : Tuple=True , a__ : Any=True , a__ : int=True , a__ : Any=True , a__ : Any=99 , a__ : str=32 , a__ : Optional[Any]=5 , a__ : Tuple=4 , a__ : Any=37 , a__ : Any="gelu" , a__ : int=0.1 , a__ : Any=0.1 , a__ : Optional[int]=512 , a__ : Optional[Any]=16 , a__ : Optional[int]=2 , a__ : Tuple=0.02 , a__ : Optional[Any]=False , a__ : Optional[Any]=True , a__ : Optional[Any]="None" , a__ : List[str]=3 , a__ : Union[str, Any]=4 , a__ : List[Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Dict = seq_length
lowerCAmelCase__ : List[str] = is_training
lowerCAmelCase__ : Dict = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : Tuple = use_labels
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : Tuple = num_attention_heads
lowerCAmelCase__ : Any = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Union[str, Any] = relative_attention
lowerCAmelCase__ : int = position_biased_input
lowerCAmelCase__ : Dict = pos_att_type
lowerCAmelCase__ : Optional[Any] = scope
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : str = None
if self.use_input_mask:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ : Dict = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self : Optional[Any] ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _A ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.get_config()
lowerCAmelCase__ : int = 300
return config
def _A ( self : List[Any] , a__ : Dict ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _A ( self : Tuple , a__ : Optional[Any] , a__ : int , a__ : Tuple , a__ : Any , a__ : List[Any] , a__ : str , a__ : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = DebertaModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : List[str] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
lowerCAmelCase__ : List[str] = model(a__ , token_type_ids=a__ )[0]
lowerCAmelCase__ : Tuple = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _A ( self : Optional[Any] , a__ : Tuple , a__ : int , a__ : Union[str, Any] , a__ : List[Any] , a__ : str , a__ : Tuple , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Any = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Optional[Any] , a__ : Any , a__ : int , a__ : Optional[int] , a__ : int , a__ : Optional[int] , a__ : Tuple , a__ : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Tuple = DebertaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def _A ( self : List[str] , a__ : Any , a__ : List[str] , a__ : Dict , a__ : str , a__ : int , a__ : List[Any] , a__ : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : List[Any] = DebertaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Tuple = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self : Optional[int] , a__ : List[str] , a__ : Dict , a__ : int , a__ : Any , a__ : Dict , a__ : List[str] , a__ : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = DebertaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Any = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Optional[Any] = config_and_inputs
lowerCAmelCase__ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
A_ : Optional[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : int = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
A_ : Any = False
A_ : Optional[int] = False
A_ : Optional[Any] = False
A_ : List[Any] = False
def _A ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = DebertaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a__ , hidden_size=37 )
def _A ( self : Optional[int] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def _A ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
@slow
def _A ( self : Optional[int] ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = DebertaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def _A ( self : List[Any] ):
'''simple docstring'''
pass
@slow
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase__ : Any = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase__ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
lowerCAmelCase__ : List[str] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 378
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@wraps(lowerCamelCase_ )
def _inner_fn(*lowerCamelCase_ , **lowerCamelCase_ ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , lowerCamelCase_ , )
return fn(*lowerCamelCase_ , **lowerCamelCase_ )
return _inner_fn
| 378
| 1
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class _lowerCamelCase (__lowerCamelCase ):
def __init__( self : List[str] , **lowerCamelCase_ : Dict ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self : Optional[int] , lowerCamelCase_ : Union[np.ndarray, bytes, str] , **lowerCamelCase_ : Dict ) -> str:
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict , **lowerCamelCase_ : str ) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = {}
if "candidate_labels" in kwargs:
_lowercase : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowercase : List[Any] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Optional[Any]="This is a sound of {}." ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
if audio.startswith('http://' ) or audio.startswith('https://' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
_lowercase : Tuple = requests.get(lowerCamelCase_ ).content
else:
with open(lowerCamelCase_ , 'rb' ) as f:
_lowercase : str = f.read()
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : int = ffmpeg_read(lowerCamelCase_ , self.feature_extractor.sampling_rate )
if not isinstance(lowerCamelCase_ , np.ndarray ):
raise ValueError('We expect a numpy ndarray as input' )
if len(audio.shape ) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline' )
_lowercase : str = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt' )
_lowercase : str = candidate_labels
_lowercase : List[str] = [hypothesis_template.format(lowerCamelCase_ ) for x in candidate_labels]
_lowercase : str = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_ )
_lowercase : Tuple = [text_inputs]
return inputs
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_lowercase : Any = model_inputs.pop('candidate_labels' )
_lowercase : Tuple = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , lowerCamelCase_ ):
_lowercase : Optional[int] = text_inputs[0]
else:
# Batching case.
_lowercase : List[Any] = text_inputs[0][0]
_lowercase : List[str] = self.model(**lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Dict ) -> int:
"""simple docstring"""
_lowercase : str = model_outputs.pop('candidate_labels' )
_lowercase : Optional[Any] = model_outputs['logits'][0]
if self.framework == "pt":
_lowercase : Optional[int] = logits.softmax(dim=0 )
_lowercase : Any = probs.tolist()
else:
raise ValueError('`tf` framework not supported.' )
_lowercase : List[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_ ) , key=lambda lowerCamelCase_ : -x[0] )
]
return result
| 721
|
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE = pytest.mark.integration
SCREAMING_SNAKE_CASE = {'comet'}
SCREAMING_SNAKE_CASE = importlib.util.find_spec('fairseq') is not None
SCREAMING_SNAKE_CASE = {'code_eval'}
SCREAMING_SNAKE_CASE = os.name == 'nt'
SCREAMING_SNAKE_CASE = {'bertscore', 'frugalscore', 'perplexity'}
SCREAMING_SNAKE_CASE = importlib.util.find_spec('transformers') is not None
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
@wraps(__UpperCAmelCase )
def wrapper(self ,__UpperCAmelCase ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self ,__UpperCAmelCase )
return wrapper
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
@wraps(__UpperCAmelCase )
def wrapper(self ,__UpperCAmelCase ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self ,__UpperCAmelCase )
return wrapper
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
@wraps(__UpperCAmelCase )
def wrapper(self ,__UpperCAmelCase ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self ,__UpperCAmelCase )
return wrapper
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@local
class _lowerCamelCase (parameterized.TestCase ):
_snake_case = {}
_snake_case = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str] ):
"""simple docstring"""
_lowercase : Optional[Any] = '[...]'
_lowercase : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , lowerCamelCase_ ) ).module_path )
_lowercase : Dict = datasets.load.import_main_class(metric_module.__name__ , dataset=lowerCamelCase_ )
# check parameters
_lowercase : Optional[int] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(lowerCamelCase_ , metric_module.__name__ ):
with self.use_local_metrics():
try:
_lowercase : Optional[Any] = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self : Any , lowerCamelCase_ : Dict ):
"""simple docstring"""
_lowercase : Optional[Any] = '[...]'
_lowercase : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , lowerCamelCase_ ) ).module_path )
# run doctest
with self.use_local_metrics():
_lowercase : str = doctest.testmod(lowerCamelCase_ , verbose=lowerCamelCase_ , raise_on_error=lowerCamelCase_ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](lowerCamelCase_ ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
def load_local_metric(lowerCamelCase_ : Optional[Any] , *lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[int] ):
return load_metric(os.path.join('metrics' , lowerCamelCase_ ) , *lowerCamelCase_ , **lowerCamelCase_ )
with patch('datasets.load_metric' ) as mock_load_metric:
_lowercase : str = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls : Tuple , lowerCamelCase_ : Tuple ):
"""simple docstring"""
def wrapper(lowerCamelCase_ : int ):
_lowercase : Any = contextmanager(lowerCamelCase_ )
_lowercase : Any = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' ,'' ,'' ) # handle pytest cli flags
class _lowerCamelCase (__lowerCamelCase ):
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
_lowercase : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
import torch
def bert_cos_score_idf(__UpperCAmelCase ,__UpperCAmelCase ,*__UpperCAmelCase ,**__UpperCAmelCase ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__UpperCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
_lowercase : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def __lowerCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
def load_from_checkpoint(__UpperCAmelCase ):
class _lowerCamelCase :
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : str , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
assert len(lowerCamelCase_ ) == 2
_lowercase : Union[str, Any] = [0.19, 0.92]
return scores, sum(lowerCamelCase_ ) / len(lowerCamelCase_ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
_lowercase : Dict = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
_lowercase : str = load_from_checkpoint
yield
def __lowerCAmelCase( ):
"""simple docstring"""
_lowercase : Tuple = load_metric(os.path.join('metrics' ,'seqeval' ) )
_lowercase : int = 'ERROR'
_lowercase : Union[str, Any] = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__UpperCAmelCase ,match=re.escape(__UpperCAmelCase ) ):
metric.compute(predictions=[] ,references=[] ,scheme=__UpperCAmelCase )
| 283
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['LayoutLMv3FeatureExtractor']
UpperCamelCase = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 473
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :tuple[int, int] , _SCREAMING_SNAKE_CASE :int ) -> list[tuple[int, int]]:
a_ , a_ : Optional[int] = position
a_ : Optional[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
a_ : List[str] = []
for position in positions:
a_ , a_ : List[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_SCREAMING_SNAKE_CASE )
return permissible_positions
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :list[list[int]] , _SCREAMING_SNAKE_CASE :tuple[int, int] , _SCREAMING_SNAKE_CASE :int ) -> bool:
if is_complete(_SCREAMING_SNAKE_CASE ):
return True
for position in get_valid_pos(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) ):
a_ , a_ : int = position
if board[y][x] == 0:
a_ : Any = curr + 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , curr + 1 ):
return True
a_ : Optional[Any] = 0
return False
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :int ) -> list[list[int]]:
a_ : int = [[0 for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
a_ : Optional[int] = 1
if open_knight_tour_helper(_SCREAMING_SNAKE_CASE , (i, j) , 1 ):
return board
a_ : Dict = 0
a_ : Union[str, Any] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 473
| 1
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCamelCase : str =logging.get_logger(__name__)
# General docstring
_UpperCamelCase : List[Any] ='RegNetConfig'
# Base docstring
_UpperCamelCase : Dict ='facebook/regnet-y-040'
_UpperCamelCase : Any =[1, 1088, 7, 7]
# Image classification docstring
_UpperCamelCase : Any ='facebook/regnet-y-040'
_UpperCamelCase : Optional[Any] ='tabby, tabby cat'
_UpperCamelCase : Optional[Any] =[
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ = 3 ,A__ = 1 ,A__ = 1 ,A__ = "relu" ,):
super().__init__()
_A : Union[str, Any] = nn.Convad(
A__ ,A__ ,kernel_size=A__ ,stride=A__ ,padding=kernel_size // 2 ,groups=A__ ,bias=A__ ,)
_A : str = nn.BatchNormad(A__ )
_A : int = ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self ,A__ ):
_A : Optional[Any] = self.convolution(A__ )
_A : Tuple = self.normalization(A__ )
_A : Optional[int] = self.activation(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ):
super().__init__()
_A : int = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
_A : List[str] = config.num_channels
def A__ ( self ,A__ ):
_A : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_A : Optional[int] = self.embedder(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ = 2 ):
super().__init__()
_A : Tuple = nn.Convad(A__ ,A__ ,kernel_size=1 ,stride=A__ ,bias=A__ )
_A : List[Any] = nn.BatchNormad(A__ )
def A__ ( self ,A__ ):
_A : List[Any] = self.convolution(A__ )
_A : str = self.normalization(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ,A__ ):
super().__init__()
_A : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
_A : str = nn.Sequential(
nn.Convad(A__ ,A__ ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(A__ ,A__ ,kernel_size=1 ) ,nn.Sigmoid() ,)
def A__ ( self ,A__ ):
# b c h w -> b c 1 1
_A : Union[str, Any] = self.pooler(A__ )
_A : Dict = self.attention(A__ )
_A : str = hidden_state * attention
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1 ):
super().__init__()
_A : Optional[int] = in_channels != out_channels or stride != 1
_A : Dict = max(1 ,out_channels // config.groups_width )
_A : Tuple = (
RegNetShortCut(A__ ,A__ ,stride=A__ ) if should_apply_shortcut else nn.Identity()
)
_A : int = nn.Sequential(
RegNetConvLayer(A__ ,A__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(A__ ,A__ ,stride=A__ ,groups=A__ ,activation=config.hidden_act ) ,RegNetConvLayer(A__ ,A__ ,kernel_size=1 ,activation=A__ ) ,)
_A : Tuple = ACTaFN[config.hidden_act]
def A__ ( self ,A__ ):
_A : List[str] = hidden_state
_A : Union[str, Any] = self.layer(A__ )
_A : str = self.shortcut(A__ )
hidden_state += residual
_A : List[str] = self.activation(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 1 ):
super().__init__()
_A : Optional[Any] = in_channels != out_channels or stride != 1
_A : List[Any] = max(1 ,out_channels // config.groups_width )
_A : List[str] = (
RegNetShortCut(A__ ,A__ ,stride=A__ ) if should_apply_shortcut else nn.Identity()
)
_A : Union[str, Any] = nn.Sequential(
RegNetConvLayer(A__ ,A__ ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(A__ ,A__ ,stride=A__ ,groups=A__ ,activation=config.hidden_act ) ,RegNetSELayer(A__ ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(A__ ,A__ ,kernel_size=1 ,activation=A__ ) ,)
_A : Tuple = ACTaFN[config.hidden_act]
def A__ ( self ,A__ ):
_A : List[Any] = hidden_state
_A : int = self.layer(A__ )
_A : List[Any] = self.shortcut(A__ )
hidden_state += residual
_A : Tuple = self.activation(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ,A__ ,A__ ,A__ = 2 ,A__ = 2 ,):
super().__init__()
_A : Dict = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
_A : List[str] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
A__ ,A__ ,A__ ,stride=A__ ,) ,*[layer(A__ ,A__ ,A__ ) for _ in range(depth - 1 )] ,)
def A__ ( self ,A__ ):
_A : Union[str, Any] = self.layers(A__ )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
def __init__( self ,A__ ):
super().__init__()
_A : int = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
A__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
_A : List[Any] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(A__ ,config.depths[1:] ):
self.stages.append(RegNetStage(A__ ,A__ ,A__ ,depth=A__ ) )
def A__ ( self ,A__ ,A__ = False ,A__ = True ):
_A : str = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_A : Optional[Any] = hidden_states + (hidden_state,)
_A : Optional[Any] = stage_module(A__ )
if output_hidden_states:
_A : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=A__ ,hidden_states=A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Any = RegNetConfig
__snake_case : int = "regnet"
__snake_case : Optional[int] = "pixel_values"
__snake_case : Tuple = True
def A__ ( self ,A__ ):
if isinstance(A__ ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(A__ ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def A__ ( self ,A__ ,A__=False ):
if isinstance(A__ ,A__ ):
_A : Optional[int] = value
_UpperCamelCase : List[str] =R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCamelCase : Union[str, Any] =R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __snake_case , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ):
super().__init__(A__ )
_A : str = config
_A : Tuple = RegNetEmbeddings(A__ )
_A : str = RegNetEncoder(A__ )
_A : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A__ ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def A__ ( self ,A__ ,A__ = None ,A__ = None ):
_A : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A : str = return_dict if return_dict is not None else self.config.use_return_dict
_A : Optional[Any] = self.embedder(A__ )
_A : Union[str, Any] = self.encoder(
A__ ,output_hidden_states=A__ ,return_dict=A__ )
_A : str = encoder_outputs[0]
_A : Tuple = self.pooler(A__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A__ ,pooler_output=A__ ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __snake_case , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ):
super().__init__(A__ )
_A : Union[str, Any] = config.num_labels
_A : Optional[Any] = RegNetModel(A__ )
# classification head
_A : str = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def A__ ( self ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,):
_A : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
_A : List[str] = self.regnet(A__ ,output_hidden_states=A__ ,return_dict=A__ )
_A : List[str] = outputs.pooler_output if return_dict else outputs[1]
_A : Dict = self.classifier(A__ )
_A : Optional[Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_A : str = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_A : List[str] = '''single_label_classification'''
else:
_A : Optional[Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
_A : List[str] = MSELoss()
if self.num_labels == 1:
_A : Tuple = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
_A : Tuple = loss_fct(A__ ,A__ )
elif self.config.problem_type == "single_label_classification":
_A : int = CrossEntropyLoss()
_A : List[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_A : Any = BCEWithLogitsLoss()
_A : List[str] = loss_fct(A__ ,A__ )
if not return_dict:
_A : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=A__ ,logits=A__ ,hidden_states=outputs.hidden_states )
| 707
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[Any] = "M-CLIP"
def __init__( self ,A__=1024 ,A__=768 ,**A__ ):
_A : Tuple = transformerDimSize
_A : Optional[Any] = imageDimSize
super().__init__(**A__ )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : int = MCLIPConfig
def __init__( self ,A__ ,*A__ ,**A__ ):
super().__init__(A__ ,*A__ ,**A__ )
_A : Optional[int] = XLMRobertaModel(A__ )
_A : Tuple = torch.nn.Linear(
in_features=config.transformerDimensions ,out_features=config.numDims )
def A__ ( self ,A__ ,A__ ):
_A : str = self.transformer(input_ids=A__ ,attention_mask=A__ )[0]
_A : str = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A__ ), embs
| 332
| 0
|
def __lowerCAmelCase ( A_ : int ) -> int:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 1:
__UpperCAmelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(snake_case_ )
__UpperCAmelCase = 1
for i in range(1 , snake_case_ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 221
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __A ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : int = AutoencoderKL
a__ : Optional[Any] = """sample"""
a__ : Union[str, Any] = 1e-2
@property
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def _lowercase (self : Any ):
return (3, 32, 32)
@property
def _lowercase (self : Dict ):
return (3, 32, 32)
def _lowercase (self : int ):
UpperCAmelCase_ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
def _lowercase (self : int ):
pass
def _lowercase (self : int ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def _lowercase (self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ = self.model_class(**__a )
model.to(__a )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ = model(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ = torch.randn_like(__a )
UpperCAmelCase_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ = self.model_class(**__a )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__a )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ = model_a(**__a ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
UpperCAmelCase_ = dict(model.named_parameters() )
UpperCAmelCase_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _lowercase (self : Any ):
UpperCAmelCase_ , UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(__a )
UpperCAmelCase_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _lowercase (self : List[str] ):
UpperCAmelCase_ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
UpperCAmelCase_ = model.to(__a )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ = torch.manual_seed(0 )
else:
UpperCAmelCase_ = torch.Generator(device=__a ).manual_seed(0 )
UpperCAmelCase_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCAmelCase_ = image.to(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , sample_posterior=__a , generator=__a ).sample
UpperCAmelCase_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
UpperCAmelCase_ = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__a , __a , rtol=1E-2 ) )
@slow
class __A ( unittest.TestCase ):
def _lowercase (self : Dict , __a : Dict , __a : int ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy"""
def _lowercase (self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[Any] , __a : Optional[Any]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ):
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a )
return image
def _lowercase (self : List[Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : List[Any]=False ):
UpperCAmelCase_ = "fp16" if fpaa else None
UpperCAmelCase_ = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ = AutoencoderKL.from_pretrained(
__a , subfolder="vae" , torch_dtype=__a , revision=__a , )
model.to(__a ).eval()
return model
def _lowercase (self : List[Any] , __a : List[Any]=0 ):
if torch_device == "mps":
return torch.manual_seed(__a )
return torch.Generator(device=__a ).manual_seed(__a )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : List[Any] , __a : Dict , __a : Optional[int] , __a : List[str] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Dict , __a : Optional[int] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , fpaa=__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a , generator=__a , sample_posterior=__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _lowercase (self : str , __a : int , __a : Union[str, Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
with torch.no_grad():
UpperCAmelCase_ = model(__a ).sample
assert sample.shape == image.shape
UpperCAmelCase_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(__a , __a , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : int , __a : int , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _lowercase (self : Union[str, Any] , __a : List[str] , __a : Optional[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ = torch.tensor(__a )
assert torch_all_close(__a , __a , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : List[str] , __a : int ):
UpperCAmelCase_ = self.get_sd_vae_model(fpaa=__a )
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def _lowercase (self : Union[str, Any] , __a : Dict ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a , shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ = model.decode(__a ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__a , __a , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _lowercase (self : Tuple , __a : List[Any] , __a : List[Any] ):
UpperCAmelCase_ = self.get_sd_vae_model()
UpperCAmelCase_ = self.get_sd_image(__a )
UpperCAmelCase_ = self.get_generator(__a )
with torch.no_grad():
UpperCAmelCase_ = model.encode(__a ).latent_dist
UpperCAmelCase_ = dist.sample(generator=__a )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ = torch.tensor(__a )
UpperCAmelCase_ = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(__a , __a , atol=__a )
| 78
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : list[int] , lowercase__ : int ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :list[list[int]] = []
lowerCAmelCase_ :list[int] = []
lowerCAmelCase_ :Dict = 0
lowerCAmelCase_ :Any = sum(lowercase__ )
create_state_space_tree(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return result
def _snake_case ( lowercase__ : list[int] , lowercase__ : int , lowercase__ : int , lowercase__ : list[int] , lowercase__ : list[list[int]] , lowercase__ : int , ) -> None:
'''simple docstring'''
if sum(lowercase__ ) > max_sum or (remaining_nums_sum + sum(lowercase__ )) < max_sum:
return
if sum(lowercase__ ) == max_sum:
result.append(lowercase__ )
return
for index in range(lowercase__ , len(lowercase__ ) ):
create_state_space_tree(
lowercase__ , lowercase__ , index + 1 , [*path, nums[index]] , lowercase__ , remaining_nums_sum - nums[index] , )
__UpperCAmelCase = [3, 34, 4, 12, 5, 2]
__UpperCAmelCase = 9
__UpperCAmelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 256
|
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0]
def _snake_case ( lowercase__ : float , lowercase__ : float ) -> float:
'''simple docstring'''
return math.pow(lowercase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 256
| 1
|
def lowerCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> float:
'''simple docstring'''
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) )
def lowerCamelCase_ ( lowerCAmelCase__ : list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(__lowercase , __lowercase ):
for item in point:
if not isinstance(__lowercase , (int, float) ):
A = (
'Expected a list of numbers as input, found '
F'''{type(__lowercase ).__name__}'''
)
raise TypeError(__lowercase )
else:
A = F'''Expected a list of numbers as input, found {type(__lowercase ).__name__}'''
raise TypeError(__lowercase )
else:
raise ValueError('Missing an input' )
def lowerCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> float:
'''simple docstring'''
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
|
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> str:
"""simple docstring"""
__A = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _SCREAMING_SNAKE_CASE ( __lowercase : str ) -> dict[str, str]:
"""simple docstring"""
__A = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__A = remove_duplicates(key.upper() )
__A = len(__lowercase )
# First fill cipher with key characters
__A = {alphabet[i]: char for i, char in enumerate(__lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__lowercase ) , 2_6 ):
__A = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__A = alphabet[i - offset]
__A = char
return cipher_alphabet
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( __lowercase : str , __lowercase : dict[str, str] ) -> str:
"""simple docstring"""
__A = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__lowercase , __lowercase ) for ch in message.upper() )
def _SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
__A = input("""Enter message to encode or decode: """ ).strip()
__A = input("""Enter keyword: """ ).strip()
__A = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
__A = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
__A = create_cipher_map(__lowercase )
print(func(__lowercase , __lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 637
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Tuple , __a : float ) -> float:
'''simple docstring'''
return 0.0
def a_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : int ) -> tuple[int | float, int | float]:
__snake_case : Any = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__snake_case : Tuple = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def a_ ( _UpperCAmelCase : FilterType ,_UpperCAmelCase : int ) -> None:
__snake_case : Any = 5_12
__snake_case : Tuple = [1] + [0] * (size - 1)
__snake_case : Optional[Any] = [filter_type.process(_UpperCAmelCase ) for item in inputs]
__snake_case : List[str] = [0] * (samplerate - size) # zero-padding
outputs += filler
__snake_case : Optional[Any] = np.abs(np.fft.fft(_UpperCAmelCase ) )
__snake_case : Any = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__snake_case : List[Any] = get_bounds(_UpperCAmelCase ,_UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) ,min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(_UpperCAmelCase )
plt.show()
def a_ ( _UpperCAmelCase : FilterType ,_UpperCAmelCase : int ) -> None:
__snake_case : List[Any] = 5_12
__snake_case : List[Any] = [1] + [0] * (size - 1)
__snake_case : Dict = [filter_type.process(_UpperCAmelCase ) for item in inputs]
__snake_case : Tuple = [0] * (samplerate - size) # zero-padding
outputs += filler
__snake_case : List[Any] = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 ,samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi ,2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(_UpperCAmelCase ,-2 * pi ) )
plt.show()
| 711
|
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A__ : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 124
| 0
|
def _a ( lowercase__ : str , lowercase__ : bool = False ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = f'''Expected string as input, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''Expected boolean as use_pascal parameter, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
SCREAMING_SNAKE_CASE__ : Any = input_str.split('_' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 if use_pascal else 1
SCREAMING_SNAKE_CASE__ : int = words[start_index:]
SCREAMING_SNAKE_CASE__ : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
SCREAMING_SNAKE_CASE__ : List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 85
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( lowercase__ : int = 3 ):
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' )
SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' )
SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' )
SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 85
| 1
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : Tuple = ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE : Any = True
SCREAMING_SNAKE_CASE : Optional[int] = '''ml.p3.2xlarge'''
SCREAMING_SNAKE_CASE : Optional[Any] = '''accelerate_sagemaker_execution_role'''
SCREAMING_SNAKE_CASE : int = '''hf-sm'''
SCREAMING_SNAKE_CASE : Optional[int] = '''us-east-1'''
SCREAMING_SNAKE_CASE : Dict = 1
SCREAMING_SNAKE_CASE : Union[str, Any] = '''accelerate-sagemaker-1'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''1.6'''
SCREAMING_SNAKE_CASE : int = '''4.4'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''train.py'''
SCREAMING_SNAKE_CASE : Dict = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
SCREAMING_SNAKE_CASE : str = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['do_train'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['epochs'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['learning_rate'] , _SCREAMING_SNAKE_CASE )
assert isinstance(converted_args['max_steps'] , _SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 514
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase : Any = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 514
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : int = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['DeiTFeatureExtractor']
snake_case_ : Optional[int] = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Tuple = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ : Optional[Any] = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 212
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A : Optional[Any] = logging.get_logger(__name__)
_A : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
__lowerCAmelCase = getattr(snake_case_ , snake_case_ ).shape
else:
__lowerCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase_ ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[Any] ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = fairseq_model.state_dict()
__lowerCAmelCase = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == """group""" , )
__lowerCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCAmelCase = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(snake_case_ )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , snake_case_ )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "weight" in name:
__lowerCAmelCase = """weight"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
else:
__lowerCAmelCase = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : int ) -> int:
'''simple docstring'''
__lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
__lowerCAmelCase = name.split(""".""" )
__lowerCAmelCase = int(items[0] )
__lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowerCAmelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def UpperCamelCase_ ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : Dict=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = HubertConfig.from_pretrained(snake_case_ )
else:
__lowerCAmelCase = HubertConfig()
if is_finetuned:
if dict_path:
__lowerCAmelCase = Dictionary.load(snake_case_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCAmelCase = target_dict.pad_index
__lowerCAmelCase = target_dict.bos_index
__lowerCAmelCase = target_dict.eos_index
__lowerCAmelCase = len(target_dict.symbols )
__lowerCAmelCase = os.path.join(snake_case_ , """vocab.json""" )
if not os.path.isdir(snake_case_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(snake_case_ ) )
return
os.makedirs(snake_case_ , exist_ok=snake_case_ )
with open(snake_case_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , snake_case_ )
__lowerCAmelCase = WavaVecaCTCTokenizer(
snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=snake_case_ , )
__lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
__lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , )
__lowerCAmelCase = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ )
processor.save_pretrained(snake_case_ )
__lowerCAmelCase = HubertForCTC(snake_case_ )
else:
__lowerCAmelCase = HubertModel(snake_case_ )
if is_finetuned:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowerCAmelCase = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
_A : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_A : str = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 330
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : list , snake_case_ : int , snake_case_ : int = 0 , snake_case_ : int = 0 ) -> int:
'''simple docstring'''
__lowerCAmelCase = right or len(snake_case_ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case_ , snake_case_ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330
| 1
|
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( a_ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''BridgeTowerImageProcessor'''
__UpperCAmelCase = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , lowercase_ , lowercase_) -> Optional[int]:
super().__init__(lowercase_ , lowercase_)
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> Tuple:
__snake_case = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
__snake_case = self.image_processor(
lowercase_ , return_tensors=lowercase_ , do_normalize=lowercase_ , do_center_crop=lowercase_ , **lowercase_)
encoding.update(lowercase_)
return encoding
def _a ( self , *lowercase_ , **lowercase_) -> List[str]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> Optional[int]:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _a ( self) -> Optional[Any]:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 313
|
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : str=99 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : int=37 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : List[str]=512 , SCREAMING_SNAKE_CASE : Optional[int]=16 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
_A : Optional[int] = parent
_A : List[Any] = batch_size
_A : int = seq_length
_A : List[str] = is_training
_A : str = use_input_mask
_A : Any = use_token_type_ids
_A : List[str] = use_labels
_A : Optional[Any] = vocab_size
_A : Tuple = hidden_size
_A : Dict = num_hidden_layers
_A : int = num_attention_heads
_A : Union[str, Any] = intermediate_size
_A : str = hidden_act
_A : Tuple = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Any = type_vocab_size
_A : Optional[int] = type_sequence_label_size
_A : Any = initializer_range
_A : Tuple = num_labels
_A : List[str] = num_choices
_A : Any = scope
def A ( self : Union[str, Any]):
_A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_A : Optional[Any] = None
if self.use_input_mask:
_A : str = random_attention_mask([self.batch_size, self.seq_length])
_A : Dict = None
if self.use_token_type_ids:
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_A : Optional[int] = None
_A : Tuple = None
_A : Optional[int] = None
if self.use_labels:
_A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_A : int = ids_tensor([self.batch_size] , self.num_choices)
_A : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[Any]):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def A ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : str = BioGptModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)
_A : Optional[int] = model(SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def A ( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , ):
_A : int = BioGptForCausalLM(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any]):
_A : int = BioGptModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
# create attention mask
_A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
_A : List[Any] = self.seq_length // 2
_A : Any = 0
# first forward pass
_A , _A : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE).to_tuple()
# create hypothetical next token and extent to next_input_ids
_A : Union[str, Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_A : Any = ids_tensor((1,) , SCREAMING_SNAKE_CASE).item() + 1
_A : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_A : int = random_other_next_tokens
# append to next input_ids and attn_mask
_A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
_A : Dict = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=SCREAMING_SNAKE_CASE)] , dim=1 , )
# get two different outputs
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
_A : List[Any] = model(SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
# select random slice
_A : List[Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : str = output_from_no_past[:, -1, random_slice_idx].detach()
_A : Optional[Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : List[Any]):
_A : int = BioGptModel(config=SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE).eval()
_A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=SCREAMING_SNAKE_CASE)
# first forward pass
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE)
_A , _A : str = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_A : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size)
_A : List[str] = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_A : Any = torch.cat([input_ids, next_tokens] , dim=-1)
_A : Tuple = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE)['last_hidden_state']
_A : List[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE)[
'last_hidden_state'
]
# select random slice
_A : Dict = ids_tensor((1,) , output_from_past.shape[-1]).item()
_A : int = output_from_no_past[:, -3:, random_slice_idx].detach()
_A : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3))
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , *SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str]=False):
_A : List[str] = BioGptForCausalLM(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_A : Dict = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[Any]):
_A : Optional[Any] = BioGptModel(SCREAMING_SNAKE_CASE)
_A : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , *SCREAMING_SNAKE_CASE : Optional[Any]):
_A : List[Any] = self.num_labels
_A : List[str] = BioGptForTokenClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Optional[int] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def A ( self : Dict):
_A : str = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) : int = config_and_inputs
_A : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
a = (BioGptForCausalLM,) if is_torch_available() else ()
a = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
a = False
def A ( self : Dict):
_A : Optional[int] = BioGptModelTester(self)
_A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37)
def A ( self : Dict):
self.config_tester.run_common_tests()
def A ( self : str):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_A : Dict = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE)
def A ( self : str):
_A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*SCREAMING_SNAKE_CASE , gradient_checkpointing=SCREAMING_SNAKE_CASE)
def A ( self : Optional[int]):
_A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*SCREAMING_SNAKE_CASE)
def A ( self : List[Any]):
_A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*SCREAMING_SNAKE_CASE)
@slow
def A ( self : List[Any]):
_A : int = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(SCREAMING_SNAKE_CASE)
_A : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_A : Optional[Any] = 'left'
# Define PAD Token = EOS Token = 50256
_A : Tuple = tokenizer.eos_token
_A : str = model.config.eos_token_id
# use different length sentences to test batching
_A : Dict = [
'Hello, my dog is a little',
'Today, I',
]
_A : int = tokenizer(SCREAMING_SNAKE_CASE , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE)
_A : List[str] = inputs['input_ids'].to(SCREAMING_SNAKE_CASE)
_A : Dict = model.generate(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=inputs['attention_mask'].to(SCREAMING_SNAKE_CASE) , )
_A : Tuple = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(SCREAMING_SNAKE_CASE)
_A : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE)
_A : List[str] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
_A : Any = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(SCREAMING_SNAKE_CASE)
_A : Union[str, Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE , max_length=model.config.max_length - num_paddings)
_A : Dict = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[str] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
self.assertListEqual(SCREAMING_SNAKE_CASE , [non_padded_sentence, padded_sentence])
@slow
def A ( self : int):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A : Union[str, Any] = BioGptModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any]):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : int = 3
_A : Dict = input_dict['input_ids']
_A : Dict = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : str = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_A : List[Any] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : List[str] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def A ( self : Any):
_A , _A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_A : Union[str, Any] = 3
_A : Optional[int] = 'multi_label_classification'
_A : str = input_dict['input_ids']
_A : int = input_ids.ne(1).to(SCREAMING_SNAKE_CASE)
_A : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_A : Optional[int] = BioGptForSequenceClassification(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_A : Tuple = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : str):
_A : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
_A : Dict = torch.tensor([[2, 4805, 9, 656, 21]])
_A : Optional[Any] = model(SCREAMING_SNAKE_CASE)[0]
_A : Optional[Any] = 42384
_A : int = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE)
_A : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4))
@slow
def A ( self : Optional[int]):
_A : Dict = BioGptTokenizer.from_pretrained('microsoft/biogpt')
_A : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(SCREAMING_SNAKE_CASE)
torch.manual_seed(0)
_A : List[Any] = tokenizer('COVID-19 is' , return_tensors='pt').to(SCREAMING_SNAKE_CASE)
_A : Optional[int] = model.generate(
**SCREAMING_SNAKE_CASE , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=SCREAMING_SNAKE_CASE , )
_A : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE)
_A : List[str] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 128
| 0
|
import csv
import tweepy
# Twitter API credentials
UpperCAmelCase_ = """"""
UpperCAmelCase_ = """"""
UpperCAmelCase_ = """"""
UpperCAmelCase_ = """"""
def __magic_name__ ( lowercase ) -> None:
"""simple docstring"""
lowercase_ : Optional[Any] = tweepy.OAuthHandler(lowercase , lowercase )
auth.set_access_token(lowercase , lowercase )
lowercase_ : Dict = tweepy.API(lowercase )
# initialize a list to hold all the tweepy Tweets
lowercase_ : int = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowercase_ : str = api.user_timeline(screen_name=lowercase , count=200 )
# save most recent tweets
alltweets.extend(lowercase )
# save the id of the oldest tweet less one
lowercase_ : Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(lowercase ) > 0:
print(f"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
lowercase_ : List[Any] = api.user_timeline(
screen_name=lowercase , count=200 , max_id=lowercase )
# save most recent tweets
alltweets.extend(lowercase )
# update the id of the oldest tweet less one
lowercase_ : Union[str, Any] = alltweets[-1].id - 1
print(f"""...{len(lowercase )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowercase_ : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(f"""new_{screen_name}_tweets.csv""" , """w""" ) as f:
lowercase_ : Optional[Any] = csv.writer(lowercase )
writer.writerow(["""id""", """created_at""", """text"""] )
writer.writerows(lowercase )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 436
|
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def __magic_name__ ( lowercase="" ) -> str:
"""simple docstring"""
lowercase_ : Dict = tempfile.mkdtemp()
return os.path.join(lowercase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Dict = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase_ : Union[str, Any] = AgentAudio(snake_case__ )
lowercase_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__, agent_type.to_raw(), atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(snake_case__ ) )
# Ensure that the file contains the same value as the original tensor
lowercase_ , lowercase_ : Any = sf.read(snake_case__ )
self.assertTrue(torch.allclose(snake_case__, torch.tensor(snake_case__ ), atol=1E-4 ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Dict = torch.rand(12, dtype=torch.floataa ) - 0.5
lowercase_ : List[str] = get_new_path(suffix=""".wav""" )
sf.write(snake_case__, snake_case__, 1_60_00 )
lowercase_ : int = AgentAudio(snake_case__ )
self.assertTrue(torch.allclose(snake_case__, agent_type.to_raw(), atol=1E-4 ) )
self.assertEqual(agent_type.to_string(), snake_case__ )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : int = torch.randint(0, 2_56, (64, 64, 3) )
lowercase_ : Dict = AgentImage(snake_case__ )
lowercase_ : Optional[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(snake_case__, agent_type._tensor, atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw(), Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowercase_ : Optional[Any] = Image.open(snake_case__ )
lowercase_ : List[str] = AgentImage(snake_case__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowercase_ : Optional[int] = Image.open(snake_case__ )
lowercase_ : List[Any] = AgentImage(snake_case__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(snake_case__ ) )
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Optional[int] = """Hey!"""
lowercase_ : Tuple = AgentText(snake_case__ )
self.assertEqual(snake_case__, agent_type.to_string() )
self.assertEqual(snake_case__, agent_type.to_raw() )
self.assertEqual(snake_case__, snake_case__ )
| 436
| 1
|
def UpperCAmelCase ( a_ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__A = set()
# Replace all the whitespace in our sentence
__A = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(a_ ) == 2_6
def UpperCAmelCase ( a_ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
__A = [False] * 2_6
for char in input_str:
if char.islower():
__A = True
elif char.isupper():
__A = True
return all(a_ )
def UpperCAmelCase ( a_ = "The quick brown fox jumps over the lazy dog" , ) -> bool:
"""simple docstring"""
return len({char for char in input_str.lower() if char.isalpha()} ) == 2_6
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
__A = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=a_ ) )
print(timeit("is_pangram_faster()" , setup=a_ ) )
print(timeit("is_pangram_fastest()" , setup=a_ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 55
|
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
__lowerCAmelCase: str = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
__lowerCAmelCase: Optional[Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase =KandinskyVaaControlnetPipeline
lowerCamelCase =['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowerCamelCase =['''image_embeds''', '''negative_image_embeds''', '''hint''']
lowerCamelCase =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase =False
@property
def lowercase_( self : Tuple ):
"""simple docstring"""
return 32
@property
def lowercase_( self : Any ):
"""simple docstring"""
return 32
@property
def lowercase_( self : str ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_( self : Union[str, Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_( self : List[str] ):
"""simple docstring"""
return 1_00
@property
def lowercase_( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__A : str = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__A : int = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def lowercase_( self : Tuple ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowercase_( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__A : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_( self : Tuple ):
"""simple docstring"""
__A : Optional[int] = self.dummy_unet
__A : List[str] = self.dummy_movq
__A : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase , )
__A : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase_( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : List[Any]=0 ):
"""simple docstring"""
__A : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
__A : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase )
# create hint
__A : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
if str(lowerCamelCase ).startswith("""mps""" ):
__A : Optional[Any] = torch.manual_seed(lowerCamelCase )
else:
__A : Optional[Any] = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__A : str = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowercase_( self : Dict ):
"""simple docstring"""
__A : str = """cpu"""
__A : str = self.get_dummy_components()
__A : str = self.pipeline_class(**lowerCamelCase )
__A : Dict = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__A : int = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
__A : List[str] = output.images
__A : Optional[Any] = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
__A : Dict = image[0, -3:, -3:, -1]
__A : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : List[str] = np.array(
[0.6_959_826, 0.868_279, 0.7_558_092, 0.68_769_467, 0.85_805_804, 0.65_977_496, 0.44_885_302, 0.5_959_111, 0.4_251_595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowercase_( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
__A : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
__A : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
__A : Union[str, Any] = torch.from_numpy(np.array(lowerCamelCase ) ).float() / 255.0
__A : Dict = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
__A : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
__A : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa )
__A : Union[str, Any] = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
__A : Any = """A robot, 4k photo"""
__A : List[str] = torch.Generator(device="""cuda""" ).manual_seed(0 )
__A : Optional[Any] = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__A : Optional[Any] = torch.Generator(device="""cuda""" ).manual_seed(0 )
__A : Optional[int] = pipeline(
image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , hint=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=1_00 , output_type="""np""" , )
__A : int = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 720
|
'''simple docstring'''
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> bool:
"""simple docstring"""
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
__A : Optional[int] = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
__A : Optional[int] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(__SCREAMING_SNAKE_CASE , 1 ):
if n < _p:
# then we have our last prime to check
__A : Union[str, Any] = primes[:idx]
break
__A , __A : int = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
__A : int = False
for r in range(__SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = pow(__SCREAMING_SNAKE_CASE , d * 2**r , __SCREAMING_SNAKE_CASE )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
__A : Optional[int] = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def A_ ( ) -> None:
"""simple docstring"""
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 499
| 0
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'RegNetConfig'
# Base docstring
__a = 'facebook/regnet-y-040'
__a = [1, 1_088, 7, 7]
# Image classification docstring
__a = 'facebook/regnet-y-040'
__a = 'tabby, tabby cat'
__a = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "relu" ,) -> Dict:
super().__init__()
UpperCAmelCase_ : List[Any] = nn.Convad(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,padding=kernel_size // 2 ,groups=_SCREAMING_SNAKE_CASE ,bias=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[str] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : List[str] = self.convolution(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> int:
super().__init__()
UpperCAmelCase_ : Optional[int] = RegNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act )
UpperCAmelCase_ : Optional[int] = config.num_channels
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Dict = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCAmelCase_ : Union[str, Any] = self.embedder(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 2 ) -> List[Any]:
super().__init__()
UpperCAmelCase_ : int = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,stride=_SCREAMING_SNAKE_CASE ,bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tensor:
UpperCAmelCase_ : List[str] = self.convolution(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__()
UpperCAmelCase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
UpperCAmelCase_ : int = nn.Sequential(
nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ) ,nn.ReLU() ,nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ) ,nn.Sigmoid() ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# b c h w -> b c 1 1
UpperCAmelCase_ : int = self.pooler(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.attention(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = hidden_state * attention
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ : str = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[int] = max(1 ,out_channels // config.groups_width )
UpperCAmelCase_ : int = (
RegNetShortCut(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ : Dict = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,groups=_SCREAMING_SNAKE_CASE ,activation=config.hidden_act ) ,RegNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,activation=_SCREAMING_SNAKE_CASE ) ,)
UpperCAmelCase_ : List[str] = ACTaFN[config.hidden_act]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : int = hidden_state
UpperCAmelCase_ : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCAmelCase_ : Optional[int] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ) -> Any:
super().__init__()
UpperCAmelCase_ : Optional[int] = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Dict = max(1 ,out_channels // config.groups_width )
UpperCAmelCase_ : List[Any] = (
RegNetShortCut(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ : List[str] = nn.Sequential(
RegNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,activation=config.hidden_act ) ,RegNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,groups=_SCREAMING_SNAKE_CASE ,activation=config.hidden_act ) ,RegNetSELayer(_SCREAMING_SNAKE_CASE ,reduced_channels=int(round(in_channels / 4 ) ) ) ,RegNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,activation=_SCREAMING_SNAKE_CASE ) ,)
UpperCAmelCase_ : List[str] = ACTaFN[config.hidden_act]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = hidden_state
UpperCAmelCase_ : Union[str, Any] = self.layer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCAmelCase_ : Optional[int] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 2 ,) -> Optional[int]:
super().__init__()
UpperCAmelCase_ : Dict = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
UpperCAmelCase_ : Dict = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,) ,*[layer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for _ in range(depth - 1 )] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : int = self.layers(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_SCREAMING_SNAKE_CASE ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
UpperCAmelCase_ : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE ,config.depths[1:] ):
self.stages.append(RegNetStage(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,depth=_SCREAMING_SNAKE_CASE ) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ) -> BaseModelOutputWithNoAttention:
UpperCAmelCase_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : Any = hidden_states + (hidden_state,)
UpperCAmelCase_ : Dict = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
UpperCAmelCase_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_SCREAMING_SNAKE_CASE ,hidden_states=_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = RegNetConfig
lowerCAmelCase = '''regnet'''
lowerCAmelCase = '''pixel_values'''
lowerCAmelCase = True
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_SCREAMING_SNAKE_CASE ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Dict:
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[Any] = value
__a = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , _a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = config
UpperCAmelCase_ : List[Any] = RegNetEmbeddings(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = RegNetEncoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> BaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Any = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : int = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.encoder(
_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = encoder_outputs[0]
UpperCAmelCase_ : Optional[int] = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE ,pooler_output=_SCREAMING_SNAKE_CASE ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _a , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = config.num_labels
UpperCAmelCase_ : Optional[int] = RegNetModel(_SCREAMING_SNAKE_CASE )
# classification head
UpperCAmelCase_ : Any = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> ImageClassifierOutputWithNoAttention:
UpperCAmelCase_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Optional[Any] = self.regnet(_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : int = self.classifier(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ : Optional[int] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ : List[str] = '''single_label_classification'''
else:
UpperCAmelCase_ : Union[str, Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase_ : str = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ : str = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
UpperCAmelCase_ : Optional[Any] = loss_fct(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ : str = CrossEntropyLoss()
UpperCAmelCase_ : int = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ : List[Any] = BCEWithLogitsLoss()
UpperCAmelCase_ : Any = loss_fct(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if not return_dict:
UpperCAmelCase_ : int = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE ,hidden_states=outputs.hidden_states )
| 30
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__a = logging.get_logger(__name__)
__a = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''imagegpt'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self ,_SCREAMING_SNAKE_CASE=512 + 1 ,_SCREAMING_SNAKE_CASE=32 * 32 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=24 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="quick_gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = n_positions
UpperCAmelCase_ : Union[str, Any] = n_embd
UpperCAmelCase_ : Any = n_layer
UpperCAmelCase_ : Optional[Any] = n_head
UpperCAmelCase_ : Union[str, Any] = n_inner
UpperCAmelCase_ : List[Any] = activation_function
UpperCAmelCase_ : List[str] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : Optional[Any] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : Dict = scale_attn_weights
UpperCAmelCase_ : Any = use_cache
UpperCAmelCase_ : List[str] = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Tuple = reorder_and_upcast_attn
UpperCAmelCase_ : int = tie_word_embeddings
super().__init__(tie_word_embeddings=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = -1 ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,) -> Mapping[str, Any]:
UpperCAmelCase_ : Any = self._generate_dummy_images(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = dict(preprocessor(images=_SCREAMING_SNAKE_CASE ,return_tensors=_SCREAMING_SNAKE_CASE ) )
return inputs
| 30
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[Any] = "▁"
lowercase__ : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
lowercase__ : int = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
lowercase__ : List[str] = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __lowercase : int , __lowercase : Optional[Any]="<s>" , __lowercase : Dict="</s>" , __lowercase : Optional[Any]="</s>" , __lowercase : int="<s>" , __lowercase : Dict="<unk>" , __lowercase : Optional[int]="<pad>" , __lowercase : Any="<mask>" , __lowercase : Optional[Dict[str, Any]] = None , **__lowercase : Dict , ):
"""simple docstring"""
snake_case_ = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
snake_case_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case_ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case_ = 1
snake_case_ = len(self.sp_model ) + self.fairseq_offset
snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int ):
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
snake_case_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[int] , __lowercase : Optional[Any] ):
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self : Dict , __lowercase : List[int] , __lowercase : Optional[List[int]] = None , __lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def snake_case__ ( self : Union[str, Any] , __lowercase : List[int] , __lowercase : Optional[List[int]] = None ):
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self : Union[str, Any] , __lowercase : str ):
"""simple docstring"""
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def snake_case__ ( self : str , __lowercase : Union[str, Any] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case_ = self.sp_model.PieceToId(__lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self : Optional[int] , __lowercase : str ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case__ ( self : List[str] , __lowercase : int ):
"""simple docstring"""
snake_case_ = "".join(__lowercase ).replace(__lowercase , " " ).strip()
return out_string
def snake_case__ ( self : str , __lowercase : str , __lowercase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
snake_case_ = os.path.join(
__lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , "wb" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
return (out_vocab_file,)
| 139
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *__lowercase : Optional[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : List[Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : Union[str, Any] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : int , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[str] , *__lowercase : Optional[int] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : str , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : List[str] , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *__lowercase : List[Any] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : Optional[Any] , **__lowercase : int ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : List[Any] , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : Any , **__lowercase : Dict ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : int , *__lowercase : Tuple , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : Dict , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : str , *__lowercase : Tuple , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : int , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[str] , *__lowercase : List[str] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *__lowercase : Any , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Any , *__lowercase : int , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : Any , **__lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : Dict , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Dict , *__lowercase : Union[str, Any] , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : List[str] , **__lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : Optional[int] , **__lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : List[Any] , *__lowercase : str , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : List[str] , **__lowercase : str ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *__lowercase : int , **__lowercase : Any ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[Any] , *__lowercase : List[Any] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : Union[str, Any] , **__lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Tuple , *__lowercase : Union[str, Any] , **__lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
class UpperCAmelCase ( metaclass=UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''sentencepiece''']
def __init__( self : Optional[int] , *__lowercase : str , **__lowercase : Tuple ):
"""simple docstring"""
requires_backends(self , ["sentencepiece"] )
| 139
| 1
|
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def snake_case_ ( self : List[str] ) -> int:
debug_launcher(test_script.main )
def snake_case_ ( self : Optional[Any] ) -> str:
debug_launcher(test_ops.main )
| 2
|
"""simple docstring"""
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __a ( _lowerCAmelCase ):
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCamelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCamelCase = bertabert.config.encoder.vocab_size
UpperCamelCase = tokenizer.sep_token_id
UpperCamelCase = tokenizer.cls_token_id
UpperCamelCase = 128
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCamelCase = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCamelCase = train_dataset.select(range(32 ) )
UpperCamelCase = val_dataset.select(range(16 ) )
UpperCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_ : Any ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCamelCase = tokenizer(batch["article"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=512 )
UpperCamelCase = tokenizer(batch["highlights"] , padding="max_length" , truncation=UpperCAmelCase_ , max_length=128 )
UpperCamelCase = inputs.input_ids
UpperCamelCase = inputs.attention_mask
UpperCamelCase = outputs.input_ids
UpperCamelCase = outputs.input_ids.copy()
UpperCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCamelCase = outputs.attention_mask
assert all(len(UpperCAmelCase_ ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase_ ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_ : Any ):
UpperCamelCase = pred.label_ids
UpperCamelCase = pred.predictions
# all unnecessary tokens are removed
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
UpperCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase_ ) )] ) / len(UpperCAmelCase_ )
return {"accuracy": accuracy}
# map train dataset
UpperCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCamelCase = self.get_auto_remove_tmp_dir()
UpperCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase_ , per_device_train_batch_size=UpperCAmelCase_ , per_device_eval_batch_size=UpperCAmelCase_ , predict_with_generate=UpperCAmelCase_ , evaluation_strategy="steps" , do_train=UpperCAmelCase_ , do_eval=UpperCAmelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCamelCase = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
# start training
trainer.train()
| 554
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( _snake_case):
def __init__( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : List[str]=5 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Optional[Any]=37 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : int=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any="None" , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Tuple = seq_length
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_input_mask
SCREAMING_SNAKE_CASE : int = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE : int = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Any = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = num_labels
SCREAMING_SNAKE_CASE : List[str] = num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
SCREAMING_SNAKE_CASE : Dict = pos_att_type
SCREAMING_SNAKE_CASE : List[Any] = scope
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self : Optional[int] ):
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self : List[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = DebertaVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : int = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = DebertaVaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = DebertaVaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_labels
SCREAMING_SNAKE_CASE : Tuple = DebertaVaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = DebertaVaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Tuple = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = DebertaVaForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( _snake_case , _snake_case , unittest.TestCase):
UpperCamelCase_ = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = DebertaVaModelTester(self )
SCREAMING_SNAKE_CASE : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def __A ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
def __A ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*UpperCamelCase__ )
@slow
def __A ( self : Tuple ):
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = DebertaVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def __A ( self : Dict ):
'''simple docstring'''
pass
@slow
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 713
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""")) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""")
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
])
class lowercase__ ( unittest.TestCase):
def __A ( self : Any ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase__ , )
assert hasattr(self , '''env''' )
def __A ( self : str , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
SCREAMING_SNAKE_CASE : Any = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version='''py36''' , )
def __A ( self : Optional[Any] , UpperCamelCase__ : List[str] ):
'''simple docstring'''
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __A ( self : Tuple , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase__ )
| 34
| 0
|
"""simple docstring"""
import collections
import os
import re
from pathlib import Path
_lowerCAmelCase = 'src/transformers'
# Matches is_xxx_available()
_lowerCAmelCase = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_lowerCAmelCase = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowerCAmelCase = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_lowerCAmelCase = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_lowerCAmelCase = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowerCAmelCase = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowerCAmelCase = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowerCAmelCase = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_lowerCAmelCase = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_lowerCAmelCase = re.compile(r'^\s*try:')
# Catches a line with else:
_lowerCAmelCase = re.compile(r'^\s*else:')
def UpperCamelCase ( _A ) -> Union[str, Any]:
if _re_test_backend.search(UpperCamelCase_ ) is None:
return None
lowercase : Optional[int] = [b[0] for b in _re_backend.findall(UpperCamelCase_ )]
backends.sort()
return "_and_".join(UpperCamelCase_ )
def UpperCamelCase ( _A ) -> Dict:
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase : Dict = f.readlines()
lowercase : Union[str, Any] = 0
while line_index < len(UpperCamelCase_ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(UpperCamelCase_ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase : Dict = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowercase : str = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(UpperCamelCase_ ):
lowercase : int = _re_one_line_import_struct.search(UpperCamelCase_ ).groups()[0]
lowercase : Any = re.findall(r"""\[([^\]]+)\]""" , UpperCamelCase_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowercase : Dict = _re_import_struct_key_value.search(UpperCamelCase_ )
if single_line_import_search is not None:
lowercase : Union[str, Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowercase : Any = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowercase : int = lines[line_index]
if _re_import_struct_add_one.search(UpperCamelCase_ ) is not None:
objects.append(_re_import_struct_add_one.search(UpperCamelCase_ ).groups()[0] )
elif _re_import_struct_add_many.search(UpperCamelCase_ ) is not None:
lowercase : Union[str, Any] = _re_import_struct_add_many.search(UpperCamelCase_ ).groups()[0].split(""", """ )
lowercase : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_between_brackets.search(UpperCamelCase_ ) is not None:
lowercase : List[str] = _re_between_brackets.search(UpperCamelCase_ ).groups()[0].split(""", """ )
lowercase : Any = [obj[1:-1] for obj in imports if len(UpperCamelCase_ ) > 0]
objects.extend(UpperCamelCase_ )
elif _re_quote_object.search(UpperCamelCase_ ) is not None:
objects.append(_re_quote_object.search(UpperCamelCase_ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
lowercase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase : Optional[int] = []
while (
line_index < len(UpperCamelCase_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowercase : List[Any] = lines[line_index]
lowercase : Tuple = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(UpperCamelCase_ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase : List[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase : str = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowercase : Dict = lines[line_index]
lowercase : Any = _re_import.search(UpperCamelCase_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
lowercase : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCamelCase ( _A , _A ) -> int:
def find_duplicates(_A ):
return [k for k, v in collections.Counter(UpperCamelCase_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase : str = []
for key in import_dict_objects.keys():
lowercase : str = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
lowercase : Union[str, Any] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase : int = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def UpperCamelCase ( ) -> List[Any]:
lowercase : Dict = []
for root, _, files in os.walk(UpperCamelCase_ ):
if "__init__.py" in files:
lowercase : str = os.path.join(UpperCamelCase_ , """__init__.py""" )
lowercase : Optional[Any] = parse_init(UpperCamelCase_ )
if objects is not None:
lowercase : int = analyze_results(*UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
lowercase : Optional[int] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("""\n""".join(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
raise ValueError("""\n\n""".join(UpperCamelCase_ ) )
def UpperCamelCase ( ) -> List[str]:
lowercase : Dict = []
for path, directories, files in os.walk(UpperCamelCase_ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(UpperCamelCase_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(UpperCamelCase_ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowercase : Optional[int] = str((Path(UpperCamelCase_ ) / folder).relative_to(UpperCamelCase_ ) )
lowercase : Union[str, Any] = short_path.replace(os.path.sep , """.""" )
submodules.append(UpperCamelCase_ )
for fname in files:
if fname == "__init__.py":
continue
lowercase : Union[str, Any] = str((Path(UpperCamelCase_ ) / fname).relative_to(UpperCamelCase_ ) )
lowercase : Tuple = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(UpperCamelCase_ )
return submodules
_lowerCAmelCase = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def UpperCamelCase ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
lowercase : List[Any] = direct_transformers_import(UpperCamelCase_ )
lowercase : List[Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(UpperCamelCase_ , """__init__.py""" ) , """r""" ) as f:
lowercase : Any = f.read()
import_structure_keys.update(set(re.findall(r"""import_structure\[\"([^\"]*)\"\]""" , UpperCamelCase_ ) ) )
lowercase : Tuple = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(UpperCamelCase_ ) > 0:
lowercase : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
F"""{list_of_modules}\n"""
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 264
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__UpperCAmelCase : List[Any] = pytest.mark.integration
@require_faiss
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : Tuple ) -> Tuple:
_a : Tuple = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def snake_case_ ( self : Optional[Any] ) -> str:
import faiss
_a : Dataset = self._create_dummy_dataset()
_a : Optional[Any] = dset.map(
lambda __snake_case , __snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__snake_case , keep_in_memory=__snake_case )
_a : List[Any] = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
_a , _a : Union[str, Any] = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def snake_case_ ( self : Optional[Any] ) -> str:
import faiss
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
_a , _a : int = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def snake_case_ ( self : List[Any] ) -> List[str]:
import faiss
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
_a , _a : Dict = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def snake_case_ ( self : Dict ) -> int:
_a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__snake_case , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def snake_case_ ( self : List[str] ) -> Dict:
from elasticsearch import Elasticsearch
_a : Dataset = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_a : int = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
_a : Dict = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
_a : List[Any] = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__snake_case )
_a , _a : Any = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : str ) -> Any:
import faiss
_a : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
_a : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_a : Optional[Any] = 1
_a , _a : Optional[int] = index.search(__snake_case )
self.assertRaises(__snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
_a : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1]
_a , _a : Any = index.search_batch(__snake_case )
self.assertRaises(__snake_case , index.search_batch , queries[0] )
_a : Dict = [scores[0] for scores in total_scores]
_a : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __snake_case )
def snake_case_ ( self : List[str] ) -> int:
import faiss
_a : List[str] = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
_a : Union[str, Any] = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__snake_case ):
_a : Optional[Any] = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def snake_case_ ( self : Union[str, Any] ) -> Union[str, Any]:
import faiss
_a : Tuple = faiss.IndexFlat(5 )
_a : Optional[Any] = FaissIndex(custom_index=__snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def snake_case_ ( self : Union[str, Any] ) -> Tuple:
import faiss
_a : List[str] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
index.save(tmp_file.name )
_a : int = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
_a : List[Any] = np.zeros(5 , dtype=np.floataa )
_a : List[Any] = 1
_a , _a : List[str] = index.search(__snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase_ ( UpperCamelCase_ ):
import faiss
_a : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
_a : Optional[int] = '''index.faiss'''
_a : List[Any] = f"""mock://{index_name}"""
index.save(UpperCamelCase_ , storage_options=mockfs.storage_options )
_a : str = FaissIndex.load(UpperCamelCase_ , storage_options=mockfs.storage_options )
_a : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
_a : Dict = 1
_a , _a : List[Any] = index.search(UpperCamelCase_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
def snake_case_ ( self : Any ) -> str:
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
_a : List[Any] = Elasticsearch()
_a : int = {'''acknowledged''': True}
_a : Tuple = ElasticSearchIndex(es_client=__snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
_a : Any = '''foo'''
_a : Union[str, Any] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_a , _a : Union[str, Any] = index.search(__snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
_a : List[str] = '''foo'''
_a : Optional[int] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
_a , _a : str = index.search(__snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
_a : Union[str, Any] = ['''foo''', '''bar''', '''foobar''']
_a : int = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_a , _a : Any = index.search_batch(__snake_case )
_a : Optional[Any] = [scores[0] for scores in total_scores]
_a : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , __snake_case )
# batched queries with timeout
_a : Any = ['''foo''', '''bar''', '''foobar''']
_a : List[str] = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
_a , _a : List[str] = index.search_batch(__snake_case , request_timeout=30 )
_a : Optional[Any] = [scores[0] for scores in total_scores]
_a : str = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , __snake_case )
| 471
| 0
|
'''simple docstring'''
def snake_case_ ( __snake_case : list[int]) -> list[int]:
lowerCAmelCase_ = len(__snake_case)
for i in range(__snake_case):
for j in range(i + 1 , __snake_case):
if numbers[j] < numbers[i]:
lowerCAmelCase_ ,lowerCAmelCase_ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
A_ : str =input('''Enter numbers separated by a comma:\n''').strip()
A_ : Dict =[int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 606
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
A_ : Tuple =[
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def snake_case_ ( __snake_case : List[Any] , __snake_case : List[Any]=None , __snake_case : Dict=None , __snake_case : Dict=None) -> Dict:
lowerCAmelCase_ = True
while ask_again:
lowerCAmelCase_ = input(__snake_case)
try:
if default is not None and len(__snake_case) == 0:
return default
return convert_value(__snake_case) if convert_value is not None else result
except Exception:
if error_message is not None:
print(__snake_case)
def snake_case_ ( __snake_case : Union[str, Any] , __snake_case : int=[] , __snake_case : Any=None , __snake_case : List[str]=0) -> str:
lowerCAmelCase_ = BulletMenu(__snake_case , __snake_case)
lowerCAmelCase_ = menu.run(default_choice=__snake_case)
return convert_value(__snake_case) if convert_value is not None else result
def snake_case_ ( __snake_case : Tuple) -> Any:
lowerCAmelCase_ = int(__snake_case)
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value])
def snake_case_ ( __snake_case : List[str]) -> Union[str, Any]:
lowerCAmelCase_ = int(__snake_case)
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value])
def snake_case_ ( __snake_case : Tuple) -> int:
lowerCAmelCase_ = int(__snake_case)
return DynamoBackend(DYNAMO_BACKENDS[value]).value
def snake_case_ ( __snake_case : Optional[int]) -> str:
lowerCAmelCase_ = int(__snake_case)
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value])
def snake_case_ ( __snake_case : int) -> Optional[Any]:
lowerCAmelCase_ = int(__snake_case)
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value])
def snake_case_ ( __snake_case : List[str]) -> Optional[Any]:
return {"yes": True, "no": False}[value.lower()]
class __UpperCAmelCase ( argparse.RawDescriptionHelpFormatter ):
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowerCAmelCase_ = super()._format_usage(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 606
| 1
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : Dict = mf_knapsack(i - 1 , __snake_case , __snake_case , __snake_case )
else:
__UpperCAmelCase : Union[str, Any] = max(
mf_knapsack(i - 1 , __snake_case , __snake_case , __snake_case ) , mf_knapsack(i - 1 , __snake_case , __snake_case , j - wt[i - 1] ) + val[i - 1] , )
__UpperCAmelCase : List[Any] = val
return f[i][j]
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Tuple = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : List[str] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCAmelCase : Union[str, Any] = dp[i - 1][w_]
return dp[n][w_], dp
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
if not (isinstance(__snake_case , (list, tuple) ) and isinstance(__snake_case , (list, tuple) )):
raise ValueError(
"Both the weights and values vectors must be either lists or tuples" )
__UpperCAmelCase : int = len(__snake_case )
if num_items != len(__snake_case ):
__UpperCAmelCase : List[Any] = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(__snake_case )} values"""
)
raise ValueError(__snake_case )
for i in range(__snake_case ):
if not isinstance(wt[i] , __snake_case ):
__UpperCAmelCase : List[Any] = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(__snake_case )
__UpperCAmelCase : int = knapsack(__snake_case , __snake_case , __snake_case , __snake_case )
__UpperCAmelCase : set = set()
_construct_solution(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return optimal_val, example_optional_set
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(__snake_case , __snake_case , i - 1 , __snake_case , __snake_case )
else:
optimal_set.add(__snake_case )
_construct_solution(__snake_case , __snake_case , i - 1 , j - wt[i - 1] , __snake_case )
if __name__ == "__main__":
_a : Optional[int] = [3, 2, 4, 4]
_a : Tuple = [4, 3, 2, 3]
_a : Any = 4
_a : int = 6
_a : Tuple = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_a , _a : Union[str, Any] = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_a , _a : Optional[int] = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 168
|
"""simple docstring"""
def _snake_case ( __snake_case : list[list[int]] , __snake_case : int , __snake_case : int , __snake_case : list[int] ):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _snake_case ( __snake_case : list[list[int]] , __snake_case : list[int] , __snake_case : int ):
"""simple docstring"""
if curr_ind == len(__snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(__snake_case ) ):
if valid_connection(__snake_case , __snake_case , __snake_case , __snake_case ):
# Insert current vertex into path as next transition
_lowerCamelCase : List[str] = next_ver
# Validate created path
if util_hamilton_cycle(__snake_case , __snake_case , curr_ind + 1 ):
return True
# Backtrack
_lowerCamelCase : Tuple = -1
return False
def _snake_case ( __snake_case : list[list[int]] , __snake_case : int = 0 ):
"""simple docstring"""
_lowerCamelCase : Any = [-1] * (len(__snake_case ) + 1)
# initialize start and end of path with starting index
_lowerCamelCase : Optional[int] = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(__snake_case , __snake_case , 1 ) else []
| 88
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowerCamelCase__( snake_case_ ):
UpperCamelCase : int = "openai/whisper-base"
UpperCamelCase : Any = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase : List[str] = "transcriber"
UpperCamelCase : List[Any] = WhisperProcessor
UpperCamelCase : Any = WhisperForConditionalGeneration
UpperCamelCase : Any = ["audio"]
UpperCamelCase : List[Any] = ["text"]
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.pre_processor(__UpperCAmelCase , return_tensors="""pt""" ).input_features
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.model.generate(inputs=__UpperCAmelCase )
def __magic_name__ ( self , __UpperCAmelCase ):
"""simple docstring"""
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0]
| 339
|
'''simple docstring'''
from __future__ import annotations
def lowercase__ ( __UpperCamelCase : str , __UpperCamelCase : list[str] | None = None ):
'''simple docstring'''
__lowercase = word_bank or []
# create a table
__lowercase = len(__UpperCamelCase ) + 1
__lowercase = []
for _ in range(__UpperCamelCase ):
table.append([] )
# seed value
__lowercase = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__UpperCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__UpperCamelCase )] == word:
__lowercase = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__UpperCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__UpperCamelCase )]:
combination.reverse()
return table[len(__UpperCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 339
| 1
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
UpperCAmelCase = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" )
model.to(UpperCamelCase__ )
from datasets import load_dataset
UpperCAmelCase = load_dataset("nielsr/rvlcdip-demo" )
UpperCAmelCase = dataset["train"][0]["image"].convert("RGB" )
UpperCAmelCase = image_processor(UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**UpperCamelCase__ )
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.Size((1, 16) )
self.assertEqual(logits.shape , UpperCamelCase__ )
UpperCAmelCase = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=UpperCamelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 323
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 230
| 0
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowercase : Tuple = logging.getLogger(__name__)
lowercase : Optional[int] = 50 # max width of layer names
lowercase : List[Any] = 70 # max width of quantizer names
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = parser.add_argument_group("quant_trainer arguments")
group.add_argument("--wprec" , type=_lowerCamelCase , default=8 , help="weight precision")
group.add_argument("--aprec" , type=_lowerCamelCase , default=8 , help="activation precision")
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling")
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers")
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers")
group.add_argument("--quant-disable-keyword" , type=_lowerCamelCase , nargs="+" , help="disable quantizers by keyword")
group.add_argument("--quant-disable-layer-module" , type=_lowerCamelCase , help="disable quantizers by keyword under layer.")
group.add_argument("--quant-enable-layer-module" , type=_lowerCamelCase , help="enable quantizers by keyword under layer")
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use")
group.add_argument("--percentile" , default=_lowerCamelCase , type=_lowerCamelCase , help="percentile for PercentileCalibrator")
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv")
group.add_argument("--clip-gelu" , metavar="N" , type=_lowerCamelCase , help="clip gelu output maximum value to N")
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]) -> Dict:
'''simple docstring'''
if args.calibrator == "max":
__UpperCamelCase : Optional[int] = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator")
__UpperCamelCase : List[Any] = "histogram"
elif args.calibrator == "mse":
__UpperCamelCase : Optional[int] = "histogram"
else:
raise ValueError(F'Invalid calibrator {args.calibrator}')
__UpperCamelCase : str = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase)
__UpperCamelCase : Optional[int] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)))
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase)
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : int=False , _lowerCamelCase : List[Any]=False) -> int:
'''simple docstring'''
logger.info("Configuring Model for Quantization")
logger.info(F'using quantization package {pytorch_quantization.__file__}')
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ["embeddings"] , which="weight" , _disabled=_lowerCamelCase)
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [""] , _disabled=_lowerCamelCase)
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase)
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [R"layer.\d+." + args.quant_disable_layer_module] , _disabled=_lowerCamelCase)
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [R"layer.\d+." + args.quant_enable_layer_module] , _disabled=_lowerCamelCase)
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase)
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase)
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu)
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
logger.info("Enabling Calibration")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
logger.info("Loading calibrated amax")
for name, module in model.named_modules():
if name.endswith("_quantizer"):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile)
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]) -> str:
'''simple docstring'''
def fusea(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : str):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , "_amax"):
print(" WARNING: NO AMAX BUFFER")
return
__UpperCamelCase : Tuple = qq._amax.detach().item()
__UpperCamelCase : int = qk._amax.detach().item()
__UpperCamelCase : Union[str, Any] = qv._amax.detach().item()
__UpperCamelCase : Tuple = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
qq._amax.fill_(_lowerCamelCase)
qk._amax.fill_(_lowerCamelCase)
qv._amax.fill_(_lowerCamelCase)
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}')
for name, mod in model.named_modules():
if name.endswith(".attention.self"):
logger.info(F'FUSE_QKV: {name:{name_width}}')
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer)
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense") and not name.endswith("attention.output.dense"):
__UpperCamelCase : Tuple = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase)
__UpperCamelCase : Tuple = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str]) -> Dict:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer") and mod._weight_quantizer.axis is not None:
__UpperCamelCase : str = mod.weight.shape[0]
__UpperCamelCase : int = mod._weight_quantizer._amax.detach()
__UpperCamelCase : Any = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> int:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_weight_quantizer"):
if not hasattr(mod.weight_quantizer , "_amax"):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER")
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__UpperCamelCase : List[str] = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis)
__UpperCamelCase : Optional[Any] = set(range(len(mod.weight.size()))) - axis_set
__UpperCamelCase : str = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}')
__UpperCamelCase : Dict = amax
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=25 , _lowerCamelCase : List[Any]=180 , _lowerCamelCase : str=None) -> Any:
'''simple docstring'''
if ignore is None:
__UpperCamelCase : List[Any] = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : int = [ignore]
__UpperCamelCase : Union[str, Any] = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , "weight"):
continue
__UpperCamelCase : int = max(_lowerCamelCase , len(_lowerCamelCase))
for name, mod in model.named_modules():
__UpperCamelCase : Tuple = getattr(_lowerCamelCase , "_input_quantizer" , _lowerCamelCase)
__UpperCamelCase : int = getattr(_lowerCamelCase , "_weight_quantizer" , _lowerCamelCase)
if not hasattr(_lowerCamelCase , "weight"):
continue
if type(_lowerCamelCase) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase) is str and s in name]:
continue
__UpperCamelCase : List[str] = F'Act:{input_q.extra_repr()}'
__UpperCamelCase : Union[str, Any] = F'Wgt:{weight_q.extra_repr()}'
__UpperCamelCase : Optional[Any] = F'{name:{name_width}} {act_str} {wgt_str}'
if len(_lowerCamelCase) <= line_width:
logger.info(_lowerCamelCase)
else:
logger.info(F'{name:{name_width}} {act_str}')
logger.info(F'{" ":{name_width}} {wgt_str}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> Tuple:
'''simple docstring'''
__UpperCamelCase : int = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer):
print(F'{name:80} {mod}')
count += 1
print(F'{count} TensorQuantizers found in model')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : str) -> Dict:
'''simple docstring'''
__UpperCamelCase : Tuple = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase)
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
else:
logger.warning(F'{name} has no {quantizer}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]="both" , **_lowerCamelCase : Dict) -> Any:
'''simple docstring'''
__UpperCamelCase : str = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_input_quantizer" , _lowerCamelCase , _lowerCamelCase)
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , "_weight_quantizer" , _lowerCamelCase , _lowerCamelCase)
logger.info(_lowerCamelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , **_lowerCamelCase : List[Any]) -> Optional[Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , "_input_quantizer") or hasattr(_lowerCamelCase , "_weight_quantizer"):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
elif name.endswith("_quantizer"):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase):
__UpperCamelCase : Optional[int] = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
logger.info(_lowerCamelCase)
| 94
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : Optional[Any] = logging.get_logger(__name__)
lowercase : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowerCamelCase__ ( __lowercase , __lowercase):
'''simple docstring'''
_A = 'nat'
_A = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self :Optional[Any] , a :Any=4 , a :Any=3 , a :int=6_4 , a :Dict=[3, 4, 6, 5] , a :Dict=[2, 4, 8, 1_6] , a :Optional[Any]=7 , a :Any=3.0 , a :Optional[int]=True , a :int=0.0 , a :Union[str, Any]=0.0 , a :List[Any]=0.1 , a :str="gelu" , a :Union[str, Any]=0.02 , a :Tuple=1E-5 , a :str=0.0 , a :Optional[int]=None , a :Dict=None , **a :Optional[Any] , ) -> int:
super().__init__(**a )
__UpperCamelCase : Any = patch_size
__UpperCamelCase : str = num_channels
__UpperCamelCase : List[Any] = embed_dim
__UpperCamelCase : str = depths
__UpperCamelCase : str = len(a )
__UpperCamelCase : Optional[Any] = num_heads
__UpperCamelCase : Dict = kernel_size
__UpperCamelCase : Union[str, Any] = mlp_ratio
__UpperCamelCase : Union[str, Any] = qkv_bias
__UpperCamelCase : List[str] = hidden_dropout_prob
__UpperCamelCase : Any = attention_probs_dropout_prob
__UpperCamelCase : Any = drop_path_rate
__UpperCamelCase : Any = hidden_act
__UpperCamelCase : Tuple = layer_norm_eps
__UpperCamelCase : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase : int = int(embed_dim * 2 ** (len(a ) - 1) )
__UpperCamelCase : List[Any] = layer_scale_init_value
__UpperCamelCase : Optional[Any] = ["stem"] + [f'stage{idx}' for idx in range(1 , len(a ) + 1 )]
__UpperCamelCase , __UpperCamelCase : Any = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 94
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __A (unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , ):
__UpperCAmelCase : List[str] = size if size is not None else {"shortest_edge": 20}
__UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Any = image_size
__UpperCAmelCase : Optional[int] = min_resolution
__UpperCAmelCase : Tuple = max_resolution
__UpperCAmelCase : List[Any] = do_resize
__UpperCAmelCase : Optional[int] = size
__UpperCAmelCase : Optional[int] = do_center_crop
__UpperCAmelCase : int = crop_size
__UpperCAmelCase : Union[str, Any] = do_flip_channel_order
def _snake_case ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __A (__UpperCAmelCase , unittest.TestCase ):
snake_case :Optional[Any] = MobileViTImageProcessor if is_vision_available() else None
def _snake_case ( self ):
__UpperCAmelCase : Any = MobileViTImageProcessingTester(self )
@property
def _snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self ):
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "size" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "center_crop" ) )
self.assertTrue(hasattr(lowerCamelCase__ , "do_flip_channel_order" ) )
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def _snake_case ( self ):
pass
def _snake_case ( self ):
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , Image.Image )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : str = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : Dict = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase__ , torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(lowerCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 168
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
__A = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
__A = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__A = BeautifulSoup(res.text, "html.parser")
__A = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 325
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
__magic_name__ : List[Any] = '''Pix2StructImageProcessor'''
__magic_name__ : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , lowerCamelCase__ , lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Tuple = False
super().__init__(_a , _a)
def __call__( self , lowerCamelCase__=None , lowerCamelCase__ = None , lowerCamelCase__ = True , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = 2_048 , lowerCamelCase__ = 0 , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = True , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> str:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text.")
# Get only text
if images is None and not self.image_processor.is_vqa:
snake_case__ : str = self.tokenizer
snake_case__ : List[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
snake_case__ : Optional[int] = self.image_processor(
_a , return_tensors=_a , max_patches=_a , **_a)
else:
# add pixel_values and bbox
snake_case__ : List[str] = self.image_processor(
_a , return_tensors=_a , max_patches=_a , header_text=_a , **_a)
if text is not None and not self.image_processor.is_vqa:
snake_case__ : Optional[Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
if "attention_mask" in text_encoding:
snake_case__ : Union[str, Any] = text_encoding.pop("attention_mask")
if "input_ids" in text_encoding:
snake_case__ : Tuple = text_encoding.pop("input_ids")
else:
snake_case__ : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(_a)
return encoding_image_processor
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a)
def UpperCAmelCase ( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a)
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : Optional[int] = self.tokenizer.model_input_names
snake_case__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 705
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A__ ( _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
snake_case__ : List[Any] = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
snake_case__ : List[str] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
snake_case__ : Any = transform(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
return image
def A__ ( _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
if "visual_encoder" in key:
snake_case__ : Any = re.sub("visual_encoder*" , "vision_model.encoder" , _UpperCAmelCase )
if "blocks" in key:
snake_case__ : List[Any] = re.sub(r"blocks" , "layers" , _UpperCAmelCase )
if "attn" in key:
snake_case__ : Optional[Any] = re.sub(r"attn" , "self_attn" , _UpperCAmelCase )
if "norm1" in key:
snake_case__ : List[str] = re.sub(r"norm1" , "layer_norm1" , _UpperCAmelCase )
if "norm2" in key:
snake_case__ : Union[str, Any] = re.sub(r"norm2" , "layer_norm2" , _UpperCAmelCase )
if "encoder.norm" in key:
snake_case__ : List[Any] = re.sub(r"encoder.norm" , "post_layernorm" , _UpperCAmelCase )
if "encoder.patch_embed.proj" in key:
snake_case__ : List[str] = re.sub(r"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _UpperCAmelCase )
if "encoder.pos_embed" in key:
snake_case__ : List[Any] = re.sub(r"encoder.pos_embed" , "embeddings.position_embedding" , _UpperCAmelCase )
if "encoder.cls_token" in key:
snake_case__ : Optional[Any] = re.sub(r"encoder.cls_token" , "embeddings.class_embedding" , _UpperCAmelCase )
if "self_attn" in key:
snake_case__ : Optional[Any] = re.sub(r"self_attn.proj" , "self_attn.projection" , _UpperCAmelCase )
return key
@torch.no_grad()
def A__ ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict=None ) -> int:
'''simple docstring'''
if config_path is not None:
snake_case__ : List[Any] = BlipConfig.from_pretrained(_UpperCAmelCase )
else:
snake_case__ : Optional[int] = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
snake_case__ : Tuple = BlipForConditionalGeneration(_UpperCAmelCase ).eval()
snake_case__ : Optional[int] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
snake_case__ : Optional[Any] = blip_decoder(pretrained=_UpperCAmelCase , image_size=3_84 , vit="base" )
snake_case__ : str = pt_model.eval()
snake_case__ : Any = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Optional[int] = modified_state_dict.pop(_UpperCAmelCase )
snake_case__ : List[Any] = rename_key(_UpperCAmelCase )
snake_case__ : List[str] = value
hf_model.load_state_dict(_UpperCAmelCase )
snake_case__ : str = 3_84
snake_case__ : Dict = load_demo_image(image_size=_UpperCAmelCase , device="cpu" )
snake_case__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case__ : int = tokenizer(["a picture of"] ).input_ids
snake_case__ : List[str] = hf_model.generate(_UpperCAmelCase , _UpperCAmelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
snake_case__ : Tuple = hf_model.generate(_UpperCAmelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCAmelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case__ : Optional[int] = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
snake_case__ : str = blip_vqa(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit="base" )
vqa_model.eval()
snake_case__ : Union[str, Any] = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : str = modified_state_dict.pop(_UpperCAmelCase )
snake_case__ : Any = rename_key(_UpperCAmelCase )
snake_case__ : Optional[Any] = value
snake_case__ : Union[str, Any] = BlipForQuestionAnswering(_UpperCAmelCase )
hf_vqa_model.load_state_dict(_UpperCAmelCase )
snake_case__ : List[Any] = ["How many dogs are in this image?"]
snake_case__ : Optional[Any] = tokenizer(_UpperCAmelCase , return_tensors="pt" ).input_ids
snake_case__ : List[Any] = hf_vqa_model.generate(_UpperCAmelCase , _UpperCAmelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
snake_case__ : List[str] = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
snake_case__ : Optional[int] = blip_itm(pretrained=_UpperCAmelCase , image_size=_UpperCAmelCase , vit="base" )
itm_model.eval()
snake_case__ : str = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Tuple = modified_state_dict.pop(_UpperCAmelCase )
snake_case__ : Optional[Any] = rename_key(_UpperCAmelCase )
snake_case__ : List[str] = value
snake_case__ : Any = BlipForImageTextRetrieval(_UpperCAmelCase )
snake_case__ : Union[str, Any] = ["A picture of a woman with a dog sitting in a beach"]
snake_case__ : Optional[Any] = tokenizer(
_UpperCAmelCase , return_tensors="pt" , padding="max_length" , truncation=_UpperCAmelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCAmelCase )
hf_itm_model.eval()
snake_case__ : List[str] = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
snake_case__ : List[Any] = hf_itm_model(_UpperCAmelCase , _UpperCAmelCase , use_itm_head=_UpperCAmelCase )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowercase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 150
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A_ :
"""simple docstring"""
def __init__( self : str ,__A : Any ,__A : Optional[int]=3 ,__A : List[str]=32 ,__A : Optional[int]=3 ,__A : Optional[Any]=10 ,__A : Any=[8, 16, 32, 64] ,__A : Optional[int]=[1, 1, 2, 1] ,__A : int=True ,__A : Dict=True ,__A : List[str]="relu" ,__A : List[Any]=3 ,__A : Optional[Any]=None ,__A : Any=["stage2", "stage3", "stage4"] ,__A : str=[2, 3, 4] ,__A : Optional[int]=1 ,) -> List[Any]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = embeddings_size
_lowercase = hidden_sizes
_lowercase = depths
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_act
_lowercase = num_labels
_lowercase = scope
_lowercase = len(__A )
_lowercase = out_features
_lowercase = out_indices
_lowercase = num_groups
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.num_labels )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
return BitConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,out_features=self.out_features ,out_indices=self.out_indices ,num_groups=self.num_groups ,)
def __UpperCAmelCase ( self : List[str] ,__A : int ,__A : Union[str, Any] ,__A : int ) -> List[Any]:
_lowercase = BitModel(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __UpperCAmelCase ( self : List[str] ,__A : List[Any] ,__A : Optional[Any] ,__A : str ) -> Union[str, Any]:
_lowercase = self.num_labels
_lowercase = BitForImageClassification(__A )
model.to(__A )
model.eval()
_lowercase = model(__A ,labels=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self : str ,__A : List[str] ,__A : Optional[Any] ,__A : int ) -> List[Any]:
_lowercase = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_lowercase = None
_lowercase = BitBackbone(config=__A )
model.to(__A )
model.eval()
_lowercase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Dict = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
_lowercase = BitModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return
@unittest.skip(reason='Bit does not output attentions' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def __UpperCAmelCase ( self : str ) -> Dict:
pass
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : int ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(config=__A )
for name, module in model.named_modules():
if isinstance(__A ,(nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
self.assertTrue(
torch.all(module.bias == 0 ) ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
def check_hidden_states_output(__A : str ,__A : List[str] ,__A : int ):
_lowercase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(**self._prepare_for_class(__A ,__A ) )
_lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowercase = self.model_tester.num_stages
self.assertEqual(len(__A ) ,expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowercase = layer_type
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase = True
check_hidden_states_output(__A ,__A ,__A )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def __UpperCAmelCase ( self : Dict ) -> Tuple:
pass
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = BitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Tuple ) -> Dict:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def __UpperCAmelCase ( self : str ) -> Any:
_lowercase = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__A )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
_lowercase = model(**__A )
# verify the logits
_lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__A ,atol=1e-4 ) )
@require_torch
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = (BitBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = BitConfig
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = BitModelTester(self )
| 67
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 67
| 1
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Any = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
lowerCamelCase_ : Tuple = DatasetInfosDict.from_directory(__UpperCAmelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ),
] , )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = str(__UpperCAmelCase )
dataset_info.write_to_directory(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = DatasetInfo.from_directory(__UpperCAmelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCAmelCase , '''dataset_info.json''' ) )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
lowerCamelCase_ : str = dataset_info._to_yaml_dict()
assert sorted(__UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowerCamelCase_ : List[str] = yaml.safe_dump(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = yaml.safe_load(__UpperCAmelCase )
assert dataset_info_yaml_dict == reloaded
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = DatasetInfo()
lowerCamelCase_ : Any = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=42 ),
'''v2''': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : str = str(__UpperCAmelCase )
dataset_infos_dict.write_to_directory(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = DatasetInfosDict.from_directory(__UpperCAmelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowerCamelCase_ : Tuple = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowerCamelCase_ : List[str] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCAmelCase , '''README.md''' ) )
| 705
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = gather(__UpperCAmelCase )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = [state.process_index]
lowerCamelCase_ : str = gather_object(__UpperCAmelCase )
assert len(__UpperCAmelCase ) == state.num_processes, F"""{gathered_obj}, {len(__UpperCAmelCase )} != {state.num_processes}"""
assert gathered_obj == list(range(state.num_processes ) ), F"""{gathered_obj} != {list(range(state.num_processes ) )}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = broadcast(__UpperCAmelCase )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
lowerCamelCase_ : int = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCamelCase_ : Optional[Any] = torch.arange(state.num_processes ).to(state.device )
lowerCamelCase_ : Any = pad_across_processes(__UpperCAmelCase )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase_ : Dict = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = reduce(__UpperCAmelCase , '''sum''' )
lowerCamelCase_ : str = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For now runs on only two processes
if state.num_processes != 2:
return
lowerCamelCase_ : Optional[int] = create_tensor(__UpperCAmelCase )
lowerCamelCase_ : Any = reduce(__UpperCAmelCase , '''mean''' )
lowerCamelCase_ : Any = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), F"""{reduced_tensor} != {truth_tensor}"""
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : int = PartialState()
state.print(F"""State: {state}""" )
state.print('''testing gather''' )
test_gather(__UpperCAmelCase )
state.print('''testing gather_object''' )
test_gather_object(__UpperCAmelCase )
state.print('''testing broadcast''' )
test_broadcast(__UpperCAmelCase )
state.print('''testing pad_across_processes''' )
test_pad_across_processes(__UpperCAmelCase )
state.print('''testing reduce_sum''' )
test_reduce_sum(__UpperCAmelCase )
state.print('''testing reduce_mean''' )
test_reduce_mean(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 418
| 0
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ : Tuple =logging.get_logger(__name__)
lowerCAmelCase__ : List[str] ={
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''detr'''
UpperCamelCase__ : Optional[int] = ['''past_key_values''']
UpperCamelCase__ : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , _A=True , _A=None , _A=3 , _A=100 , _A=6 , _A=2_048 , _A=8 , _A=6 , _A=2_048 , _A=8 , _A=0.0 , _A=0.0 , _A=True , _A="relu" , _A=256 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.0_2 , _A=1.0 , _A=False , _A="sine" , _A="resnet50" , _A=True , _A=False , _A=1 , _A=5 , _A=2 , _A=1 , _A=1 , _A=5 , _A=2 , _A=0.1 , **_A , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["resnet"](out_features=['stage4'] )
elif isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = backbone_config.get('model_type' )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(_A )
# set timm attributes to None
__SCREAMING_SNAKE_CASE = None, None, None
__SCREAMING_SNAKE_CASE = use_timm_backbone
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=_A , **_A )
@property
def _A ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _A ( self ):
'''simple docstring'''
return self.d_model
@classmethod
def _A ( cls , _A , **_A ):
'''simple docstring'''
return cls(backbone_config=_A , **_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class UpperCAmelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = version.parse('''1.11''' )
@property
def _A ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def _A ( self ):
'''simple docstring'''
return 1e-5
@property
def _A ( self ):
'''simple docstring'''
return 12
| 148
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def A ( _A, _A, _A=None, _A=None ):
"""simple docstring"""
if attention_mask is None:
snake_case_ :List[str] = tf.cast(tf.math.not_equal(_A, config.pad_token_id ), tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
a__ = OPTConfig
a__ = {}
a__ = 'gelu'
def __init__( self , a , a=13 , a=7 , a=True , a=False , a=99 , a=16 , a=2 , a=4 , a=4 , a="gelu" , a=0.1 , a=0.1 , a=20 , a=2 , a=1 , a=0 , a=16 , a=16 , ):
"""simple docstring"""
snake_case_ :Dict = parent
snake_case_ :Tuple = batch_size
snake_case_ :int = seq_length
snake_case_ :List[Any] = is_training
snake_case_ :Tuple = use_labels
snake_case_ :List[str] = vocab_size
snake_case_ :Dict = hidden_size
snake_case_ :Union[str, Any] = num_hidden_layers
snake_case_ :Any = num_attention_heads
snake_case_ :List[str] = intermediate_size
snake_case_ :int = hidden_act
snake_case_ :Dict = hidden_dropout_prob
snake_case_ :Any = attention_probs_dropout_prob
snake_case_ :str = max_position_embeddings
snake_case_ :Tuple = eos_token_id
snake_case_ :Optional[int] = pad_token_id
snake_case_ :Optional[int] = bos_token_id
snake_case_ :Any = embed_dim
snake_case_ :Any = word_embed_proj_dim
snake_case_ :Tuple = False
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ :List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ :Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ :Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=a , **self.config_updates , )
snake_case_ :List[Any] = prepare_opt_inputs_dict(a , a )
return config, inputs_dict
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :Union[str, Any] = TFOPTModel(config=a )
snake_case_ :Union[str, Any] = inputs_dict["input_ids"]
snake_case_ :Tuple = input_ids[:1, :]
snake_case_ :Union[str, Any] = inputs_dict["attention_mask"][:1, :]
snake_case_ :Union[str, Any] = 1
# first forward pass
snake_case_ :int = model(a , attention_mask=a , use_cache=a )
snake_case_ , snake_case_ :List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ :Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ :List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ :List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ :List[str] = model(a , attention_mask=a )[0]
snake_case_ :int = model(a , attention_mask=a , past_key_values=a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ :List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ :List[Any] = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ :Union[str, Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(a , a , rtol=1e-3 )
@require_tf
class __lowerCAmelCase (__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
a__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
a__ = (TFOPTForCausalLM,) if is_tf_available() else ()
a__ = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
a__ = False
a__ = False
a__ = False
a__ = 10
def _a ( self ):
"""simple docstring"""
snake_case_ :Union[str, Any] = TFOPTModelTester(self )
snake_case_ :Tuple = ConfigTester(self , config_class=a )
def _a ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*a )
def _a ( self ):
"""simple docstring"""
snake_case_ , snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(a , a ):
if hasattr(a , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(a , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case_ :str = model_class(config=a )
snake_case_ :List[str] = _get_word_embedding_weight(a , model.get_input_embeddings() )
snake_case_ :Optional[Any] = _get_word_embedding_weight(a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(a )
snake_case_ :Tuple = _get_word_embedding_weight(a , model.get_input_embeddings() )
snake_case_ :Tuple = _get_word_embedding_weight(a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case_ :str = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , a )
# check that weights remain the same after resizing
snake_case_ :List[str] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ :List[Any] = False
self.assertTrue(a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , a )
snake_case_ :List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case_ :Optional[Any] = False
self.assertTrue(a )
def A ( _A ):
"""simple docstring"""
return tf.constant(_A, dtype=tf.intaa )
@require_tf
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
a__ = 99
def _a ( self ):
"""simple docstring"""
snake_case_ :Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case_ :Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case_ :List[str] = input_ids.shape[0]
snake_case_ :Union[str, Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self ):
"""simple docstring"""
snake_case_ :int = TFOPTModel.from_pretrained("facebook/opt-350m" )
snake_case_ :List[Any] = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
snake_case_ :str = tf.not_equal(a , model.config.pad_token_id )
with tf.GradientTape():
snake_case_ :Dict = model(input_ids=a , attention_mask=a ).last_hidden_state
snake_case_ :Optional[int] = (1, 11, 5_12)
self.assertEqual(output.shape , a )
snake_case_ :Union[str, Any] = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4e-3 ) )
snake_case_ :Optional[int] = tf.function(a , jit_compile=a )
snake_case_ :Optional[int] = xla_generate(a , a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , a , atol=4e-2 ) )
@require_tf
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def _a ( self ):
"""simple docstring"""
super().setUp()
snake_case_ :List[str] = "facebook/opt-350m"
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case_ :List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
snake_case_ :Tuple = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case_ :Optional[Any] = tokenizer(a , return_tensors="tf" , padding=a , add_special_tokens=a )
snake_case_ :Optional[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case_ :int = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(a , a , atol=1e-4 ) )
snake_case_ :int = tf.function(a , jit_compile=a )
snake_case_ :List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(a , a , atol=1e-4 ) )
@require_tf
@slow
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self ):
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _a ( self ):
"""simple docstring"""
snake_case_ :int = "facebook/opt-125m"
snake_case_ :List[str] = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ :Tuple = []
snake_case_ :Optional[int] = GPTaTokenizer.from_pretrained(a )
snake_case_ :List[str] = TFOPTForCausalLM.from_pretrained(a )
for prompt in self.prompts:
snake_case_ :Dict = tokenizer(a , return_tensors="tf" ).input_ids
snake_case_ :str = model.generate(a , max_length=10 )
snake_case_ :Optional[Any] = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
def _a ( self ):
"""simple docstring"""
snake_case_ :Optional[int] = "facebook/opt-350m"
snake_case_ :Tuple = GPTaTokenizer.from_pretrained(a )
snake_case_ :Tuple = TFOPTForCausalLM.from_pretrained(a )
snake_case_ :List[str] = "left"
# use different length sentences to test batching
snake_case_ :Dict = [
"Hello, my dog is a little",
"Today, I",
]
snake_case_ :int = tokenizer(a , return_tensors="tf" , padding=a )
snake_case_ :Dict = inputs["input_ids"]
snake_case_ :List[Any] = model.generate(input_ids=a , attention_mask=inputs["attention_mask"] )
snake_case_ :str = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
snake_case_ :Any = model.generate(input_ids=a )
snake_case_ :List[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
snake_case_ :List[str] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
snake_case_ :Union[str, Any] = model.generate(input_ids=a , max_length=model.config.max_length - num_paddings )
snake_case_ :Union[str, Any] = tokenizer.batch_decode(a , skip_special_tokens=a )
snake_case_ :int = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a )
snake_case_ :Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a )
snake_case_ :str = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(a , a )
self.assertListEqual(a , [non_padded_sentence, padded_sentence] )
def _a ( self ):
"""simple docstring"""
snake_case_ :Tuple = "facebook/opt-350m"
snake_case_ :int = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
snake_case_ :str = []
snake_case_ :Tuple = GPTaTokenizer.from_pretrained(a )
snake_case_ :Any = TFOPTForCausalLM.from_pretrained(a )
for prompt in self.prompts:
snake_case_ :Any = tokenizer(a , return_tensors="tf" ).input_ids
snake_case_ :List[str] = model.generate(a , max_length=10 )
snake_case_ :str = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 584
| 0
|
'''simple docstring'''
from math import pi
def __magic_name__( lowerCamelCase, lowerCamelCase):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(9_0, 1_0))
| 715
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = BeautifulSoup(requests.get(lowerCamelCase, params=lowerCamelCase).content, '''html.parser''')
__lowerCAmelCase = soup.find('''div''', attrs={'''class''': '''gs_ri'''})
__lowerCAmelCase = div.find('''div''', attrs={'''class''': '''gs_fl'''}).find_all('''a''')
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 474
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : str = str(UpperCamelCase__ )
__magic_name__ : Any = [n]
for i in range(1 , len(UpperCamelCase__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(str(UpperCamelCase__ ) ) > 3:
if not is_prime(int(str(UpperCamelCase__ )[-3:] ) ) or not is_prime(int(str(UpperCamelCase__ )[:3] ) ):
return False
return True
def _UpperCamelCase ( UpperCamelCase__ = 11 ):
"""simple docstring"""
__magic_name__ : list[int] = []
__magic_name__ : Optional[int] = 13
while len(UpperCamelCase__ ) != count:
if validate(UpperCamelCase__ ):
__magic_name__ : List[str] = list_truncated_nums(UpperCamelCase__ )
if all(is_prime(UpperCamelCase__ ) for i in list_nums ):
list_truncated_primes.append(UpperCamelCase__ )
num += 2
return list_truncated_primes
def _UpperCamelCase ( ):
"""simple docstring"""
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(f"{sum(compute_truncated_primes(11)) = }")
| 436
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class _snake_case ( snake_case_ ):
'''simple docstring'''
__snake_case = "t5"
__snake_case = ["past_key_values"]
__snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self: Optional[int] , __UpperCamelCase: Any=3_2128 , __UpperCamelCase: Any=512 , __UpperCamelCase: Optional[Any]=64 , __UpperCamelCase: Any=2048 , __UpperCamelCase: List[Any]=6 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: List[str]=8 , __UpperCamelCase: Tuple=32 , __UpperCamelCase: Optional[Any]=128 , __UpperCamelCase: List[Any]=0.1 , __UpperCamelCase: Dict=1E-6 , __UpperCamelCase: int=1.0 , __UpperCamelCase: Optional[int]="relu" , __UpperCamelCase: int=True , __UpperCamelCase: str=True , __UpperCamelCase: List[Any]=0 , __UpperCamelCase: Any=1 , **__UpperCamelCase: Union[str, Any] , ) -> Optional[int]:
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Any = d_model
__magic_name__ : List[str] = d_kv
__magic_name__ : List[Any] = d_ff
__magic_name__ : Optional[int] = num_layers
__magic_name__ : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : str = num_heads
__magic_name__ : Any = relative_attention_num_buckets
__magic_name__ : List[str] = relative_attention_max_distance
__magic_name__ : int = dropout_rate
__magic_name__ : Optional[Any] = layer_norm_epsilon
__magic_name__ : Tuple = initializer_factor
__magic_name__ : int = feed_forward_proj
__magic_name__ : Optional[int] = use_cache
__magic_name__ : Any = self.feed_forward_proj.split("-" )
__magic_name__ : Any = act_info[-1]
__magic_name__ : Dict = act_info[0] == "gated"
if len(__UpperCamelCase ) > 1 and act_info[0] != "gated" or len(__UpperCamelCase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__magic_name__ : List[str] = "gelu_new"
super().__init__(
pad_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase , )
class _snake_case ( snake_case_ ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self: List[Any] ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : str = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
__magic_name__ : Union[str, Any] = "past_encoder_sequence + sequence"
__magic_name__ : List[Any] = {0: "batch"}
__magic_name__ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__magic_name__ : int = {0: "batch", 1: "decoder_sequence"}
__magic_name__ : List[Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="inputs" )
return common_inputs
@property
def lowerCAmelCase__ ( self: Optional[int] ) -> int:
return 13
| 436
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = [
['''attention''', '''attn'''],
['''encoder_attention''', '''encoder_attn'''],
['''q_lin''', '''q_proj'''],
['''k_lin''', '''k_proj'''],
['''v_lin''', '''v_proj'''],
['''out_lin''', '''out_proj'''],
['''norm_embeddings''', '''layernorm_embedding'''],
['''position_embeddings''', '''embed_positions'''],
['''embeddings''', '''embed_tokens'''],
['''ffn.lin''', '''fc'''],
]
def lowerCamelCase ( UpperCAmelCase_ : Tuple )-> Dict:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a =k.replace(UpperCAmelCase_ , UpperCAmelCase_ )
if k.startswith("""encoder""" ):
a =k.replace(""".attn""" , """.self_attn""" )
a =k.replace("""norm1""" , """self_attn_layer_norm""" )
a =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
a =k.replace("""norm1""" , """self_attn_layer_norm""" )
a =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
a =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowerCamelCase ( UpperCAmelCase_ : Any )-> int:
"""simple docstring"""
a =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
a =sd.pop(UpperCAmelCase_ )
a =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
a =v
_lowerCamelCase = ['''START''']
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] )-> List[Any]:
"""simple docstring"""
a =torch.load(UpperCAmelCase_ , map_location="""cpu""" )
a =model["""model"""]
a =BlenderbotConfig.from_json_file(UpperCAmelCase_ )
a =BlenderbotForConditionalGeneration(UpperCAmelCase_ )
a =m.model.state_dict().keys()
a =[]
a ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a =rename_state_dict_key(UpperCAmelCase_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase_ )
m.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
m.half()
m.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''')
parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''')
parser.add_argument(
'''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use'''
)
_lowerCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 321
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
"""kwargs, expected""" , [
({"""num_shards""": 0, """max_num_jobs""": 1}, []),
({"""num_shards""": 10, """max_num_jobs""": 1}, [range(10 )]),
({"""num_shards""": 10, """max_num_jobs""": 10}, [range(UpperCAmelCase_ , i + 1 ) for i in range(10 )]),
({"""num_shards""": 1, """max_num_jobs""": 10}, [range(1 )]),
({"""num_shards""": 10, """max_num_jobs""": 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({"""num_shards""": 3, """max_num_jobs""": 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int )-> int:
"""simple docstring"""
a =_distribute_shards(**UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, max_num_jobs, expected""" , [
({"""foo""": 0}, 10, [{"""foo""": 0}]),
({"""shards""": [0, 1, 2, 3]}, 1, [{"""shards""": [0, 1, 2, 3]}]),
({"""shards""": [0, 1, 2, 3]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}, {"""shards""": [2]}, {"""shards""": [3]}]),
({"""shards""": [0, 1]}, 4, [{"""shards""": [0]}, {"""shards""": [1]}]),
({"""shards""": [0, 1, 2, 3]}, 2, [{"""shards""": [0, 1]}, {"""shards""": [2, 3]}]),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any )-> Any:
"""simple docstring"""
a =_split_gen_kwargs(UpperCAmelCase_ , UpperCAmelCase_ )
assert out == expected
@pytest.mark.parametrize(
"""gen_kwargs, expected""" , [
({"""foo""": 0}, 1),
({"""shards""": [0]}, 1),
({"""shards""": [0, 1, 2, 3]}, 4),
({"""shards""": [0, 1, 2, 3], """foo""": 0}, 4),
({"""shards""": [0, 1, 2, 3], """other""": (0, 1)}, 4),
({"""shards""": [0, 1, 2, 3], """shards2""": [0, 1]}, RuntimeError),
] , )
def lowerCamelCase ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] )-> int:
"""simple docstring"""
if expected is RuntimeError:
with pytest.raises(UpperCAmelCase_ ):
_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
else:
a =_number_of_shards_in_gen_kwargs(UpperCAmelCase_ )
assert out == expected
| 321
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> str:
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> Dict:
__lowerCamelCase : str = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__lowerCamelCase : int = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__lowerCamelCase : List[Any] = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__lowerCamelCase : List[str] = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__lowerCamelCase : int = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__lowerCamelCase : Union[str, Any] = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__lowerCamelCase : Any = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__lowerCamelCase : str = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__lowerCamelCase : Optional[Any] = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__lowerCamelCase : int = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__lowerCamelCase : Union[str, Any] = key.replace('image_encoder.module' , 'flava.image_model' )
__lowerCamelCase : List[Any] = key.replace('text_encoder.module' , 'flava.text_model' )
__lowerCamelCase : Union[str, Any] = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__lowerCamelCase : Any = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__lowerCamelCase : Optional[Any] = key.replace('text_projection' , 'flava.text_projection' )
__lowerCamelCase : Union[str, Any] = key.replace('image_projection' , 'flava.image_projection' )
__lowerCamelCase : Dict = value.float()
for key, value in codebook_state_dict.items():
__lowerCamelCase : Union[str, Any] = value
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=None ) -> Optional[int]:
if config_path is not None:
__lowerCamelCase : Optional[Any] = FlavaConfig.from_pretrained(_lowerCAmelCase )
else:
__lowerCamelCase : List[Any] = FlavaConfig()
__lowerCamelCase : Union[str, Any] = FlavaForPreTraining(_lowerCAmelCase ).eval()
__lowerCamelCase : List[Any] = convert_dalle_checkpoint(_lowerCAmelCase , _lowerCAmelCase , save_checkpoint=_lowerCAmelCase )
if os.path.exists(_lowerCAmelCase ):
__lowerCamelCase : str = torch.load(_lowerCAmelCase , map_location='cpu' )
else:
__lowerCamelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='cpu' )
__lowerCamelCase : Union[str, Any] = upgrade_state_dict(_lowerCAmelCase , _lowerCAmelCase )
hf_model.load_state_dict(_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = hf_model.state_dict()
__lowerCamelCase : Dict = count_parameters(_lowerCAmelCase )
__lowerCamelCase : Tuple = count_parameters(_lowerCAmelCase ) + count_parameters(_lowerCAmelCase )
assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
A__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
A__ : Tuple = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 13
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = CodeGenTokenizer
lowerCAmelCase_ = CodeGenTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'add_prefix_space': True}
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowerCamelCase : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
_lowerCamelCase : Any = dict(zip(__A,range(len(__A ) ) ) )
_lowerCamelCase : Optional[int] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_lowerCamelCase : Tuple = {"unk_token": "<unk>"}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["vocab_file"] )
_lowerCamelCase : Dict = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file,"w",encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file,"w",encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
def lowerCamelCase_ ( self : Dict,**__A : Tuple ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : Union[str, Any],**__A : int ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname,**__A )
def lowerCamelCase_ ( self : str,__A : Dict ):
_lowerCamelCase : Optional[Any] = "lower newer"
_lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def lowerCamelCase_ ( self : Dict ):
_lowerCamelCase : int = CodeGenTokenizer(self.vocab_file,self.merges_file,**self.special_tokens_map )
_lowerCamelCase : Any = "lower newer"
_lowerCamelCase : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
self.assertListEqual(__A,__A )
_lowerCamelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
_lowerCamelCase : Dict = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Any ):
if not self.test_rust_tokenizer:
return
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = "lower newer"
# Testing tokenization
_lowerCamelCase : List[Any] = tokenizer.tokenize(__A,add_prefix_space=__A )
_lowerCamelCase : str = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids without special tokens
_lowerCamelCase : str = tokenizer.encode(__A,add_special_tokens=__A,add_prefix_space=__A )
_lowerCamelCase : List[str] = rust_tokenizer.encode(__A,add_special_tokens=__A )
self.assertListEqual(__A,__A )
# Testing conversion to ids with special tokens
_lowerCamelCase : List[Any] = self.get_rust_tokenizer(add_prefix_space=__A )
_lowerCamelCase : Union[str, Any] = tokenizer.encode(__A,add_prefix_space=__A )
_lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A,__A )
# Testing the unknown token
_lowerCamelCase : Optional[int] = tokens + [rust_tokenizer.unk_token]
_lowerCamelCase : Optional[Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__A ),__A )
def lowerCamelCase_ ( self : Tuple,*__A : Any,**__A : Any ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowerCamelCase_ ( self : int,__A : Optional[int]=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(__A,**__A )
# Simple input
_lowerCamelCase : Dict = "This is a simple input"
_lowerCamelCase : Any = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Tuple = ("This is a simple input", "This is a pair")
_lowerCamelCase : Tuple = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Simple input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
# Pair input
self.assertRaises(__A,tokenizer_r.encode,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(__A,tokenizer_r.encode_plus,__A,max_length=__A,padding="max_length" )
# Pair input
self.assertRaises(
__A,tokenizer_r.batch_encode_plus,__A,max_length=__A,padding="max_length",)
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : str = CodeGenTokenizer.from_pretrained(self.tmpdirname,pad_token="<pad>" )
# Simple input
_lowerCamelCase : Tuple = "This is a simple input"
_lowerCamelCase : Dict = ["This is a simple input looooooooong", "This is a simple input"]
_lowerCamelCase : Dict = ("This is a simple input", "This is a pair")
_lowerCamelCase : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
_lowerCamelCase : Dict = tokenizer.pad_token_id
_lowerCamelCase : Dict = tokenizer(__A,padding="max_length",max_length=3_0,return_tensors="np" )
_lowerCamelCase : int = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
_lowerCamelCase : List[Any] = tokenizer(*__A,padding="max_length",max_length=6_0,return_tensors="np" )
_lowerCamelCase : Tuple = tokenizer(__A,padding=__A,truncate=__A,return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1],3_0 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1],3_3 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1],6_0 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1],5_2 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : List[Any] = "$$$"
_lowerCamelCase : Tuple = CodeGenTokenizer.from_pretrained(self.tmpdirname,bos_token=__A,add_bos_token=__A )
_lowerCamelCase : List[str] = "This is a simple input"
_lowerCamelCase : Optional[Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowerCamelCase : Union[str, Any] = tokenizer.bos_token_id
_lowerCamelCase : Any = tokenizer(__A )
_lowerCamelCase : List[str] = tokenizer(__A )
self.assertEqual(out_s.input_ids[0],__A )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
_lowerCamelCase : int = tokenizer.decode(out_s.input_ids )
_lowerCamelCase : str = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0],__A )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : int = CodeGenTokenizer.from_pretrained("Salesforce/codegen-350M-mono" )
_lowerCamelCase : Optional[Any] = "\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"
_lowerCamelCase : Dict = "\nif len_a > len_b: result = a\nelse: result = b"
_lowerCamelCase : Any = tokenizer.encode(__A )
_lowerCamelCase : str = ["^#", re.escape("<|endoftext|>" ), "^'''", "^\"\"\"", "\n\n\n"]
_lowerCamelCase : List[Any] = tokenizer.decode(__A,truncate_before_pattern=__A )
self.assertEqual(__A,__A )
def lowerCamelCase_ ( self : Any ):
pass
| 44
| 0
|
import os
import sys
import unittest
__lowerCamelCase : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowerCamelCase : Union[str, Any] = os.path.join(git_repo_path, '''src''', '''transformers''')
__lowerCamelCase : int = '''
{0} = None
'''
__lowerCamelCase : List[Any] = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__lowerCamelCase : Union[str, Any] = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class __snake_case ( unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""" )
self.assertIsNone(_lowercase )
SCREAMING_SNAKE_CASE__ = find_backend(""" if not is_tokenizers_available():""" )
self.assertEqual(_lowercase , """tokenizers""" )
SCREAMING_SNAKE_CASE__ = find_backend(""" if not is_tensorflow_text_available():""" )
self.assertEqual(_lowercase , """tensorflow_text""" )
SCREAMING_SNAKE_CASE__ = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""" )
self.assertEqual(_lowercase , """sentencepiece_and_tokenizers""" )
SCREAMING_SNAKE_CASE__ = find_backend(
""" if not (is_sentencepiece_available() and is_tensorflow_text_available()):""" )
self.assertEqual(_lowercase , """sentencepiece_and_tensorflow_text""" )
SCREAMING_SNAKE_CASE__ = find_backend(
""" if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""" )
self.assertEqual(_lowercase , """sentencepiece_and_tokenizers_and_vision""" )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , _lowercase )
self.assertIn("""tensorflow_text""" , _lowercase )
self.assertIn("""sentencepiece_and_tokenizers""" , _lowercase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertModel""" , objects["""tf"""] )
self.assertIn("""FlaxBertModel""" , objects["""flax"""] )
self.assertIn("""BertModel""" , objects["""torch"""] )
self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""] )
self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""] )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(_lowercase , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE__ = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
_lowercase , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE__ = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
SCREAMING_SNAKE_CASE__ = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(_lowercase , _lowercase )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
SCREAMING_SNAKE_CASE__ = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , _lowercase )
| 379
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "nllb-moe"
lowerCAmelCase_ = ["past_key_values"]
lowerCAmelCase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , _lowercase : Tuple=12_81_12 , _lowercase : List[Any]=10_24 , _lowercase : Any=12 , _lowercase : List[Any]=40_96 , _lowercase : str=16 , _lowercase : str=12 , _lowercase : Optional[int]=40_96 , _lowercase : List[Any]=16 , _lowercase : str=0.05 , _lowercase : Tuple=0.05 , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : Optional[Any]="relu" , _lowercase : str=10_24 , _lowercase : Tuple=0.1 , _lowercase : int=0.1 , _lowercase : Dict=0.0 , _lowercase : List[str]=0.02 , _lowercase : int=2 , _lowercase : Optional[Any]=True , _lowercase : List[Any]=False , _lowercase : List[str]="float32" , _lowercase : Optional[Any]=False , _lowercase : str=1_28 , _lowercase : int=64 , _lowercase : Optional[int]=4 , _lowercase : List[str]=4 , _lowercase : Union[str, Any]=0.0_01 , _lowercase : List[Any]=0.0_01 , _lowercase : List[str]="all" , _lowercase : Optional[Any]=False , _lowercase : int=False , _lowercase : Tuple=1.0 , _lowercase : Optional[int]=0.2 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=0 , _lowercase : List[Any]=2 , _lowercase : int=False , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = use_cache
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ = router_z_loss_coef
SCREAMING_SNAKE_CASE__ = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ = decoder_sparse_step
SCREAMING_SNAKE_CASE__ = encoder_sparse_step
SCREAMING_SNAKE_CASE__ = num_experts
SCREAMING_SNAKE_CASE__ = expert_capacity
SCREAMING_SNAKE_CASE__ = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE__ = router_dtype
SCREAMING_SNAKE_CASE__ = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ = batch_prioritized_routing
SCREAMING_SNAKE_CASE__ = second_expert_policy
SCREAMING_SNAKE_CASE__ = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE__ = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE__ = moe_token_dropout
SCREAMING_SNAKE_CASE__ = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 379
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 205
|
def lowercase ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = SCREAMING_SNAKE_CASE_ = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
# update the maximum and minimum subarray products
SCREAMING_SNAKE_CASE_ = numbers[i]
if number < 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = min_till_now, max_till_now
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , max_till_now * number )
SCREAMING_SNAKE_CASE_ = min(SCREAMING_SNAKE_CASE , min_till_now * number )
# update the maximum product found till now
SCREAMING_SNAKE_CASE_ = max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max_prod
| 205
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
__lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def __magic_name__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ):
lowercase : List[str] = (3, 32, 128)
lowercase : Any = tempfile.mkdtemp()
# fmt: off
lowercase : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
lowercase : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowercase__ ) + "\n" )
lowercase : Optional[int] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
lowercase : Optional[Any] = os.path.join(self.tmpdirname , lowercase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase__ , lowercase__ )
def __magic_name__ ( self , **_a ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __magic_name__ ( self , **_a ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowercase__ )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Any = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
lowercase : str = Image.fromarray(np.moveaxis(lowercase__ , 0 , -1 ) )
return image_input
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Any = self.get_image_processor()
lowercase : List[Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
lowercase : Any = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_tokenizer()
lowercase : Any = self.get_image_processor()
lowercase : Optional[Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
processor.save_pretrained(self.tmpdirname )
lowercase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Optional[Any] = self.get_image_processor(do_normalize=lowercase__ , padding_value=1.0 )
lowercase : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , lowercase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase__ )
def __magic_name__ ( self ):
lowercase : List[str] = self.get_image_processor()
lowercase : int = self.get_tokenizer()
lowercase : int = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase : Any = self.prepare_image_inputs()
lowercase : Optional[int] = image_processor(lowercase__ , return_tensors="np" )
lowercase : Union[str, Any] = processor(images=lowercase__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : List[str] = self.get_image_processor()
lowercase : Tuple = self.get_tokenizer()
lowercase : List[str] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase : Any = """test"""
lowercase : str = processor(text=lowercase__ )
lowercase : Optional[Any] = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : List[str] = self.get_image_processor()
lowercase : Dict = self.get_tokenizer()
lowercase : List[Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase : Union[str, Any] = """test"""
lowercase : Union[str, Any] = self.prepare_image_inputs()
lowercase : Tuple = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(lowercase__ ):
processor()
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Union[str, Any] = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
lowercase : str = processor.char_decode(lowercase__ )
lowercase : List[str] = tokenizer.batch_decode(lowercase__ )
lowercase : Optional[Any] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(lowercase__ , lowercase__ )
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : str = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase : List[str] = None
lowercase : Optional[int] = self.prepare_image_inputs()
lowercase : Optional[Any] = processor(text=lowercase__ , images=lowercase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __magic_name__ ( self ):
lowercase : List[Any] = self.get_image_processor()
lowercase : List[str] = self.get_tokenizer()
lowercase : Tuple = MgpstrProcessor(tokenizer=lowercase__ , image_processor=lowercase__ )
lowercase : Union[str, Any] = torch.randn(1 , 27 , 38 )
lowercase : Tuple = torch.randn(1 , 27 , 50_257 )
lowercase : str = torch.randn(1 , 27 , 30_522 )
lowercase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 704
|
"""simple docstring"""
_A : List[Any] = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __magic_name__ ( __snake_case : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(__snake_case , __snake_case ):
lowercase : Dict = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(__snake_case )
lowercase : int = "".join(bin(__snake_case )[2:].zfill(8 ) for byte in data )
lowercase : Dict = len(__snake_case ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase : List[str] = B"=" * ((6 - len(__snake_case ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__snake_case ) % 6)
else:
lowercase : Optional[int] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__snake_case ) , 6 ) ).encode()
+ padding
)
def __magic_name__ ( __snake_case : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__snake_case , __snake_case ) and not isinstance(__snake_case , __snake_case ):
lowercase : Any = (
"argument should be a bytes-like object or ASCII string, "
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(__snake_case )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__snake_case , __snake_case ):
try:
lowercase : Optional[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase : Optional[Any] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__snake_case ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase : Union[str, Any] = encoded_data[:-padding]
lowercase : Tuple = "".join(
bin(B64_CHARSET.index(__snake_case ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase : Any = "".join(
bin(B64_CHARSET.index(__snake_case ) )[2:].zfill(6 ) for char in encoded_data )
lowercase : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__snake_case ) , 8 )
]
return bytes(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 518
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'wavlm'
def __init__( self : List[Any] ,lowerCamelCase : List[str]=32 ,lowerCamelCase : Any=768 ,lowerCamelCase : Optional[int]=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : List[str]=3072 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.0 ,lowerCamelCase : int=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : Optional[int]=1E-5 ,lowerCamelCase : int="group" ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) ,lowerCamelCase : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) ,lowerCamelCase : List[Any]=(10, 3, 3, 3, 3, 2, 2) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Tuple=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : int=320 ,lowerCamelCase : Dict=800 ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Dict=10 ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : List[str]=0.0 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : str=320 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Dict=100 ,lowerCamelCase : Optional[Any]=256 ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : List[str]="mean" ,lowerCamelCase : Dict=False ,lowerCamelCase : int=False ,lowerCamelCase : Tuple=256 ,lowerCamelCase : str=(512, 512, 512, 512, 1500) ,lowerCamelCase : List[Any]=(5, 3, 3, 1, 1) ,lowerCamelCase : List[Any]=(1, 2, 3, 1, 1) ,lowerCamelCase : str=512 ,lowerCamelCase : Optional[Any]=80 ,lowerCamelCase : Optional[int]=0 ,lowerCamelCase : Dict=1 ,lowerCamelCase : Any=2 ,lowerCamelCase : List[str]=False ,lowerCamelCase : Optional[Any]=3 ,lowerCamelCase : int=2 ,lowerCamelCase : int=3 ,lowerCamelCase : List[Any]=None ,**lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_buckets
__SCREAMING_SNAKE_CASE = max_bucket_distance
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = num_ctc_classes
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
__SCREAMING_SNAKE_CASE = add_adapter
__SCREAMING_SNAKE_CASE = adapter_kernel_size
__SCREAMING_SNAKE_CASE = adapter_stride
__SCREAMING_SNAKE_CASE = num_adapter_layers
__SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 109
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCamelCase : List[str] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , *UpperCamelCase: Any , **UpperCamelCase: List[str] ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 328
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a :str = logging.get_logger(__name__)
a :Any = {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = """convbert"""
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-1_2 , _a=1 , _a=0 , _a=2 , _a=768 , _a=2 , _a=9 , _a=1 , _a=None , **_a , ) -> Dict:
"""simple docstring"""
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : int = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = embedding_size
SCREAMING_SNAKE_CASE__ : str = head_ratio
SCREAMING_SNAKE_CASE__ : Union[str, Any] = conv_kernel_size
SCREAMING_SNAKE_CASE__ : int = num_groups
SCREAMING_SNAKE_CASE__ : Union[str, Any] = classifier_dropout
class __a (UpperCamelCase_):
'''simple docstring'''
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
SCREAMING_SNAKE_CASE__ : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 715
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a :Optional[int] = None
a :Optional[Any] = logging.get_logger(__name__)
a :Optional[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
a :Union[str, Any] = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
a :Any = {
"facebook/nllb-large-en-ro": 1_024,
"facebook/nllb-200-distilled-600M": 1_024,
}
# fmt: off
a :Tuple = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE :str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE :int = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE :Tuple = NllbTokenizer
_SCREAMING_SNAKE_CASE :List[int] = []
_SCREAMING_SNAKE_CASE :List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
SCREAMING_SNAKE_CASE__ : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE__ : str = False if not self.vocab_file else True
SCREAMING_SNAKE_CASE__ : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
SCREAMING_SNAKE_CASE__ : List[str] = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
SCREAMING_SNAKE_CASE__ : Dict = src_lang if src_lang is not None else """eng_Latn"""
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_tokens_to_ids(self._src_lang )
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , _a , _a = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , _a , _a , _a , _a , **_a ) -> Tuple:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
SCREAMING_SNAKE_CASE__ : Dict = src_lang
SCREAMING_SNAKE_CASE__ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_tokens_to_ids(_a )
SCREAMING_SNAKE_CASE__ : List[Any] = tgt_lang_id
return inputs
def _a ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = src_lang
SCREAMING_SNAKE_CASE__ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Dict = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Dict = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : int = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _a ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cur_lang_code]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.eos_token_id]
SCREAMING_SNAKE_CASE__ : Any = self.convert_ids_to_tokens(self.prefix_tokens )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
SCREAMING_SNAKE_CASE__ : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _a ( self , _a , _a = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
SCREAMING_SNAKE_CASE__ : Dict = os.path.join(
_a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 12
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase (__lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = DebertaTokenizer
_lowerCamelCase = True
_lowerCamelCase = DebertaTokenizerFast
def __UpperCamelCase ( self : str):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
UpperCamelCase__ : int = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
UpperCamelCase__ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCamelCase__ : Optional[int] = {'unk_token': '[UNK]'}
UpperCamelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
UpperCamelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Dict):
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def __UpperCamelCase ( self : str , UpperCAmelCase_ : List[Any]):
UpperCamelCase__ : Optional[int] = 'lower newer'
UpperCamelCase__ : Optional[Any] = 'lower newer'
return input_text, output_text
def __UpperCamelCase ( self : Tuple):
UpperCamelCase__ : Tuple = self.get_tokenizer()
UpperCamelCase__ : Dict = 'lower newer'
UpperCamelCase__ : List[str] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
UpperCamelCase__ : List[str] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Dict = tokens + [tokenizer.unk_token]
UpperCamelCase__ : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.get_tokenizer()
UpperCamelCase__ : str = tokenizer('Hello' , 'World')
UpperCamelCase__ : int = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , UpperCAmelCase_)
@slow
def __UpperCamelCase ( self : List[Any]):
UpperCamelCase__ : List[str] = self.tokenizer_class.from_pretrained('microsoft/deberta-base')
UpperCamelCase__ : Dict = tokenizer.encode('sequence builders' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : List[str] = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCAmelCase_)
UpperCamelCase__ : Any = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : int = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
UpperCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __UpperCamelCase ( self : Union[str, Any]):
UpperCamelCase__ : Tuple = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class)
for tokenizer_class in tokenizer_classes:
UpperCamelCase__ : Dict = tokenizer_class.from_pretrained('microsoft/deberta-base')
UpperCamelCase__ : List[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
UpperCamelCase__ : Union[str, Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_)
UpperCamelCase__ : List[Any] = [tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_) for seq in encoding['input_ids']]
# fmt: off
UpperCamelCase__ : Optional[Any] = {
'input_ids': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCamelCase__ : str = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , UpperCAmelCase_)
for expected, decoded in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
| 596
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = '''blenderbot-small'''
_lowerCamelCase = ['''past_key_values''']
_lowerCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Tuple , UpperCAmelCase_ : str=50_265 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Dict=2_048 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Any=2_048 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=2 , **UpperCAmelCase_ : Union[str, Any] , ):
UpperCamelCase__ : List[str] = vocab_size
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : Optional[int] = d_model
UpperCamelCase__ : int = encoder_ffn_dim
UpperCamelCase__ : int = encoder_layers
UpperCamelCase__ : List[Any] = encoder_attention_heads
UpperCamelCase__ : Optional[Any] = decoder_ffn_dim
UpperCamelCase__ : Optional[Any] = decoder_layers
UpperCamelCase__ : Dict = decoder_attention_heads
UpperCamelCase__ : List[str] = dropout
UpperCamelCase__ : List[Any] = attention_dropout
UpperCamelCase__ : int = activation_dropout
UpperCamelCase__ : Optional[Any] = activation_function
UpperCamelCase__ : Any = init_std
UpperCamelCase__ : Optional[Any] = encoder_layerdrop
UpperCamelCase__ : List[Any] = decoder_layerdrop
UpperCamelCase__ : int = use_cache
UpperCamelCase__ : List[Any] = encoder_layers
UpperCamelCase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
class __lowercase (__lowerCamelCase ):
@property
def __UpperCamelCase ( self : List[Any]):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
UpperCamelCase__ : Union[str, Any] = {0: 'batch'}
UpperCamelCase__ : int = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase__ : Tuple = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase__ : Dict = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
UpperCamelCase__, UpperCamelCase__ : List[Any] = self.num_layers
for i in range(UpperCAmelCase_):
UpperCamelCase__ : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase__ : Optional[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCamelCase__ : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def __UpperCamelCase ( self : Optional[int]):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : Union[str, Any] = super().outputs
else:
UpperCamelCase__ : Union[str, Any] = super(UpperCAmelCase_ , self).outputs
if self.use_past:
UpperCamelCase__, UpperCamelCase__ : int = self.num_layers
for i in range(UpperCAmelCase_):
UpperCamelCase__ : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase__ : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCamelCase ( self : Optional[int] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
UpperCamelCase__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Generate decoder inputs
UpperCamelCase__ : str = seq_length if not self.use_past else 1
UpperCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Optional[int] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ : Optional[int] = dict(**UpperCAmelCase_ , **UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
UpperCamelCase__, UpperCamelCase__ : Any = common_inputs['input_ids'].shape
UpperCamelCase__ : List[Any] = common_inputs['decoder_input_ids'].shape[1]
UpperCamelCase__, UpperCamelCase__ : Any = self.num_attention_heads
UpperCamelCase__ : Tuple = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : List[str] = decoder_seq_length + 3
UpperCamelCase__ : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ : int = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_)] , dim=1)
UpperCamelCase__ : int = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__, UpperCamelCase__ : Dict = self.num_layers
UpperCamelCase__ : Dict = min(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : int = max(UpperCAmelCase_ , UpperCAmelCase_) - min_num_layers
UpperCamelCase__ : int = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCAmelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
))
# TODO: test this.
UpperCamelCase__ : Tuple = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)))
return common_inputs
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
UpperCamelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase__ : List[Any] = seqlen + 2
UpperCamelCase__, UpperCamelCase__ : Optional[Any] = self.num_layers
UpperCamelCase__, UpperCamelCase__ : Tuple = self.num_attention_heads
UpperCamelCase__ : str = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ : Optional[Any] = common_inputs['attention_mask'].dtype
UpperCamelCase__ : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
UpperCamelCase__ : Union[str, Any] = [
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(UpperCAmelCase_)
]
return common_inputs
def __UpperCamelCase ( self : int , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCamelCase__ : Optional[Any] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ : List[Any] = tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
UpperCamelCase__ : Union[str, Any] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ : List[str] = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
UpperCamelCase__ : List[Any] = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_))
return common_inputs
def __UpperCamelCase ( self : Optional[Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
elif self.task == "causal-lm":
UpperCamelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
else:
UpperCamelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
return common_inputs
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]):
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ : Any = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
else:
UpperCamelCase__ : Optional[Any] = super(UpperCAmelCase_ , self)._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
| 596
| 1
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
a_: List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_: int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a_: Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a_: Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
_lowerCAmelCase =ZeroShotClassificationPipeline(
model=lowerCamelCase_ , tokenizer=lowerCamelCase_ , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Any ):
_lowerCAmelCase =classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(lowerCamelCase_ , {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} )
# No kwarg
_lowerCAmelCase =classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(lowerCamelCase_ , {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} )
_lowerCAmelCase =classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(lowerCamelCase_ , {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} )
_lowerCAmelCase =classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
lowerCamelCase_ , {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_lowerCAmelCase =classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
lowerCamelCase_ , {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_lowerCAmelCase =classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(lowerCamelCase_ , {"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ )]} )
# https://github.com/huggingface/transformers/issues/13846
_lowerCAmelCase =classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
lowerCamelCase_ , [
{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]}
for i in range(1 )
] , )
_lowerCAmelCase =classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
lowerCamelCase_ , [
{"""sequence""": ANY(lowerCamelCase_ ), """labels""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )], """scores""": [ANY(lowerCamelCase_ ), ANY(lowerCamelCase_ )]}
for i in range(2 )
] , )
with self.assertRaises(lowerCamelCase_ ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(lowerCamelCase_ ):
classifier(lowerCamelCase_ , candidate_labels="""politics""" )
with self.assertRaises(lowerCamelCase_ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(lowerCamelCase_ ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=lowerCamelCase_ )
with self.assertRaises(lowerCamelCase_ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(lowerCamelCase_ ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=lowerCamelCase_ , )
self.run_entailment_id(lowerCamelCase_ )
def lowerCAmelCase__ ( self : str , lowerCamelCase_ : Pipeline ):
_lowerCAmelCase =zero_shot_classifier.model.config
_lowerCAmelCase =config.labelaid
_lowerCAmelCase =zero_shot_classifier.entailment_id
_lowerCAmelCase ={"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_lowerCAmelCase ={"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_lowerCAmelCase ={"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_lowerCAmelCase ={"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_lowerCAmelCase =original_labelaid
self.assertEqual(lowerCamelCase_ , zero_shot_classifier.entailment_id )
@require_torch
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
_lowerCAmelCase =zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCAmelCase__ ( self : Optional[Any] ):
_lowerCAmelCase =pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
_lowerCAmelCase =zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCAmelCase__ ( self : List[Any] ):
_lowerCAmelCase =pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
_lowerCAmelCase =zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
_lowerCAmelCase =zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=lowerCamelCase_ , )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
_lowerCAmelCase =zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
_lowerCAmelCase =zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=lowerCamelCase_ , )
self.assertEqual(
nested_simplify(lowerCamelCase_ ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 149
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
# TODO Update this
__SCREAMING_SNAKE_CASE : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Any = """esm"""
def __init__( self : Dict , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : Tuple=768 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Optional[Any]=3072 , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : List[Any]=1026 , lowerCamelCase_ : List[str]=0.02 , lowerCamelCase_ : str=1e-12 , lowerCamelCase_ : int="absolute" , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Any=False , lowerCamelCase_ : Dict=False , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Union[str, Any] , ):
super().__init__(pad_token_id=lowerCamelCase_ , mask_token_id=lowerCamelCase_ , **lowerCamelCase_ )
_lowerCAmelCase =vocab_size
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =max_position_embeddings
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =position_embedding_type
_lowerCAmelCase =use_cache
_lowerCAmelCase =emb_layer_norm_before
_lowerCAmelCase =token_dropout
_lowerCAmelCase =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
_lowerCAmelCase =EsmFoldConfig()
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowerCAmelCase =EsmFoldConfig(**lowerCamelCase_ )
_lowerCAmelCase =esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
_lowerCAmelCase =get_default_vocab_list()
else:
_lowerCAmelCase =vocab_list
else:
_lowerCAmelCase =None
_lowerCAmelCase =None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , lowerCamelCase_ ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =super().to_dict()
if isinstance(self.esmfold_config , lowerCamelCase_ ):
_lowerCAmelCase =self.esmfold_config.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: str = None
a_: bool = True
a_: bool = False
a_: bool = False
a_: bool = False
a_: float = 0
a_: bool = True
a_: bool = False
a_: int = 1_28
a_: "TrunkConfig" = None
def lowerCAmelCase__ ( self : str ):
if self.trunk is None:
_lowerCAmelCase =TrunkConfig()
elif isinstance(self.trunk , lowerCamelCase_ ):
_lowerCAmelCase =TrunkConfig(**self.trunk )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =asdict(self )
_lowerCAmelCase =self.trunk.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: int = 48
a_: int = 10_24
a_: int = 1_28
a_: int = 32
a_: int = 32
a_: int = 32
a_: float = 0
a_: float = 0
a_: bool = False
a_: int = 4
a_: Optional[int] = 1_28
a_: "StructureModuleConfig" = None
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.structure_module is None:
_lowerCAmelCase =StructureModuleConfig()
elif isinstance(self.structure_module , lowerCamelCase_ ):
_lowerCAmelCase =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
F" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
F" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
_lowerCAmelCase =self.sequence_state_dim // self.sequence_head_width
_lowerCAmelCase =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
F" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
F" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(F"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowerCAmelCase__ ( self : Any ):
_lowerCAmelCase =asdict(self )
_lowerCAmelCase =self.structure_module.to_dict()
return output
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a_: int = 3_84
a_: int = 1_28
a_: int = 16
a_: int = 1_28
a_: int = 12
a_: int = 4
a_: int = 8
a_: float = 0.1
a_: int = 8
a_: int = 1
a_: int = 2
a_: int = 7
a_: int = 10
a_: float = 1e-8
a_: float = 1e5
def lowerCAmelCase__ ( self : int ):
return asdict(self )
def snake_case_ ( ):
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 149
| 1
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_=0.2 , a_=0.2 ) -> List[str]:
lowercase : Any = bp_numa
lowercase : Tuple = bp_numa
lowercase : Optional[Any] = bp_numa
lowercase : Union[str, Any] = conva_get[:2]
lowercase : Dict = conva_get[2]
lowercase : List[Any] = size_pa
lowercase : Any = rate_w
lowercase : List[Any] = rate_t
lowercase : List[Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowercase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1
lowercase : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
lowercase : Optional[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def a__ ( self , a_ ) -> Optional[Any]:
# save model dict with pickle
lowercase : Any = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(a_ , "wb" ) as f:
pickle.dump(a_ , a_ )
print(F'''Model saved: {save_path}''' )
@classmethod
def a__ ( cls , a_ ) -> Tuple:
# read saved model
with open(a_ , "rb" ) as f:
lowercase : Any = pickle.load(a_ ) # noqa: S301
lowercase : Optional[Any] = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowercase : List[Any] = model_dic.get("size_pooling1" )
lowercase : str = model_dic.get("num_bp1" )
lowercase : List[Any] = model_dic.get("num_bp2" )
lowercase : List[Any] = model_dic.get("num_bp3" )
lowercase : Tuple = model_dic.get("rate_weight" )
lowercase : List[str] = model_dic.get("rate_thre" )
# create model instance
lowercase : Optional[Any] = CNN(a_ , a_ , a_ , a_ , a_ , a_ , a_ )
# modify model parameter
lowercase : int = model_dic.get("w_conv1" )
lowercase : Optional[int] = model_dic.get("wkj" )
lowercase : Tuple = model_dic.get("vji" )
lowercase : str = model_dic.get("thre_conv1" )
lowercase : Optional[Any] = model_dic.get("thre_bp2" )
lowercase : Any = model_dic.get("thre_bp3" )
return conv_ins
def a__ ( self , a_ ) -> Any:
return 1 / (1 + np.exp(-1 * x ))
def a__ ( self , a_ ) -> Union[str, Any]:
return round(a_ , 3 )
def a__ ( self , a_ , a_ , a_ , a_ , a_ ) -> List[Any]:
# convolution process
lowercase : int = convs[0]
lowercase : int = convs[1]
lowercase : Dict = np.shape(a_ )[0]
# get the data slice of original image data, data_focus
lowercase : List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , a_ ):
for j_focus in range(0 , size_data - size_conv + 1 , a_ ):
lowercase : str = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(a_ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase : str = []
lowercase : Optional[int] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(a_ ):
lowercase : Dict = []
for i_focus in range(len(a_ ) ):
lowercase : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(a_ ) )
lowercase : Union[str, Any] = np.asmatrix(a_ ).reshape(
a_ , a_ )
data_featuremap.append(a_ )
# expanding the data slice to One dimenssion
lowercase : Optional[Any] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(a_ ) )
lowercase : Tuple = np.asarray(a_ )
return focus_list, data_featuremap
def a__ ( self , a_ , a_ , a_="average_pool" ) -> Dict:
# pooling process
lowercase : Union[str, Any] = len(featuremaps[0] )
lowercase : str = int(size_map / size_pooling )
lowercase : Tuple = []
for i_map in range(len(a_ ) ):
lowercase : str = featuremaps[i_map]
lowercase : Any = []
for i_focus in range(0 , a_ , a_ ):
for j_focus in range(0 , a_ , a_ ):
lowercase : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(a_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(a_ ) )
lowercase : List[str] = np.asmatrix(a_ ).reshape(a_ , a_ )
featuremap_pooled.append(a_ )
return featuremap_pooled
def a__ ( self , a_ ) -> List[str]:
# expanding three dimension data to one dimension list
lowercase : Dict = []
for i in range(len(a_ ) ):
lowercase : Any = np.shape(data[i] )
lowercase : str = data[i].reshape(1 , shapes[0] * shapes[1] )
lowercase : Union[str, Any] = data_listed.getA().tolist()[0]
data_expanded.extend(a_ )
lowercase : int = np.asarray(a_ )
return data_expanded
def a__ ( self , a_ ) -> Dict:
# expanding matrix to one dimension list
lowercase : int = np.asarray(a_ )
lowercase : str = np.shape(a_ )
lowercase : Union[str, Any] = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def a__ ( self , a_ , a_ , a_ , a_ , a_ ) -> List[str]:
lowercase : List[Any] = []
lowercase : Optional[int] = 0
for i_map in range(a_ ):
lowercase : Dict = np.ones((size_map, size_map) )
for i in range(0 , a_ , a_ ):
for j in range(0 , a_ , a_ ):
lowercase : Dict = pd_pool[
i_pool
]
lowercase : Union[str, Any] = i_pool + 1
lowercase : Optional[int] = np.multiply(
a_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(a_ )
return pd_all
def a__ ( self , a_ , a_ , a_ , a_ , a_ , a_=bool ) -> Tuple:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(a_ )) )
print((" - - Shape: Teach_Data ", np.shape(a_ )) )
lowercase : int = 0
lowercase : int = []
lowercase : Union[str, Any] = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
lowercase : Tuple = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(a_ ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase : List[Any] = np.asmatrix(datas_train[p] )
lowercase : Optional[int] = np.asarray(datas_teach[p] )
lowercase , lowercase : List[str] = self.convolute(
a_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase : Optional[int] = self.pooling(a_ , self.size_poolinga )
lowercase : Tuple = np.shape(a_ )
lowercase : List[Any] = self._expand(a_ )
lowercase : Tuple = data_bp_input
lowercase : Union[str, Any] = np.dot(a_ , self.vji.T ) - self.thre_bpa
lowercase : Any = self.sig(a_ )
lowercase : int = np.dot(a_ , self.wkj.T ) - self.thre_bpa
lowercase : Tuple = self.sig(a_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase : Optional[int] = np.multiply(
(data_teach - bp_outa) , np.multiply(a_ , (1 - bp_outa) ) )
lowercase : int = np.multiply(
np.dot(a_ , self.wkj ) , np.multiply(a_ , (1 - bp_outa) ) )
lowercase : Union[str, Any] = np.dot(a_ , self.vji )
lowercase : List[str] = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase : Tuple = pd_conva_pooled.T.getA().tolist()
lowercase : List[Any] = self._calculate_gradient_from_pool(
a_ , a_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase : Tuple = self._expand_mat(pd_conva_all[k_conv] )
lowercase : Union[str, Any] = self.rate_weight * np.dot(a_ , a_ )
lowercase : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase : Union[str, Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase : Dict = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowercase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase : Optional[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase : Dict = rp + 1
lowercase : Optional[int] = error_count / patterns
all_mse.append(a_ )
def draw_error():
lowercase : List[Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(a_ , "+-" )
plt.plot(a_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(a_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def a__ ( self , a_ ) -> Optional[Any]:
# model predict
lowercase : List[Any] = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(a_ )) )
for p in range(len(a_ ) ):
lowercase : Optional[int] = np.asmatrix(datas_test[p] )
lowercase , lowercase : Optional[int] = self.convolute(
a_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase : Optional[Any] = self.pooling(a_ , self.size_poolinga )
lowercase : Any = self._expand(a_ )
lowercase : Optional[int] = data_bp_input
lowercase : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
lowercase : List[Any] = self.sig(a_ )
lowercase : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase : Union[str, Any] = self.sig(a_ )
produce_out.extend(bp_outa.getA().tolist() )
lowercase : List[str] = [list(map(self.do_round , a_ ) ) for each in produce_out]
return np.asarray(a_ )
def a__ ( self , a_ ) -> Union[str, Any]:
# return the data of image after convoluting process so we can check it out
lowercase : Optional[Any] = np.asmatrix(a_ )
lowercase , lowercase : Optional[int] = self.convolute(
a_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowercase : Tuple = self.pooling(a_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 372
|
'''simple docstring'''
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCAmelCase : List[str] = re.compile("""[^A-Za-z_0-9]""")
# parameters used in DuplicationIndex
lowerCAmelCase : Union[str, Any] = 1_0
lowerCAmelCase : Optional[Any] = 2_5_6
def _A ( A ) -> Optional[MinHash]:
if len(A ) < MIN_NUM_TOKENS:
return None
lowercase : List[Any] = MinHash(num_perm=A )
for token in set(A ):
min_hash.update(token.encode() )
return min_hash
def _A ( A ) -> Set[str]:
return {t for t in NON_ALPHA.split(A ) if len(t.strip() ) > 0}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , *,
a_ = 0.85 , ) -> List[str]:
lowercase : Any = duplication_jaccard_threshold
lowercase : str = NUM_PERM
lowercase : Union[str, Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase : Any = defaultdict(a_ )
def a__ ( self , a_ , a_ ) -> None:
lowercase : Dict = self._index.query(a_ )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(a_ , a_ )
if len(a_ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a_ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a_ )
def a__ ( self ) -> List[List[Dict]]:
lowercase : str = []
for base, duplicates in self._duplicate_clusters.items():
lowercase : str = [base] + list(a_ )
# reformat the cluster to be a list of dict
lowercase : Optional[Any] = [{"base_index": el[0], "repo_name": el[1], "path": el[2]} for el in cluster]
duplicate_clusters.append(a_ )
return duplicate_clusters
def a__ ( self , a_ ) -> None:
lowercase : Tuple = self.get_duplicate_clusters()
with open(a_ , "w" ) as f:
json.dump(a_ , a_ )
def _A ( A ) -> Dict:
lowercase , lowercase : List[str] = element
lowercase : int = get_min_hash([t for t in NON_ALPHA.split(data["content"] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def _A ( A ) -> Any:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(A ,max_queue_size=1_0_0_0_0 ) ,chunksize=1_0_0 ,):
if data is not None:
yield data
def _A ( A ,A ) -> List[str]:
lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=A )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(A ) ) ,max_queue_size=1_0_0 ) ):
di.add(A ,A )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def _A ( A ,A ) -> float:
lowercase : int = get_tokens(A )
lowercase : Dict = get_tokens(A )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCAmelCase : List[str] = None
def _A ( A ,A ) -> Union[str, Any]:
lowercase : int = []
for elementa in cluster:
lowercase : Any = _shared_dataset[elementa["base_index"]]["content"]
for elementa in extremes:
lowercase : Optional[int] = _shared_dataset[elementa["base_index"]]["content"]
if jaccard_similarity(A ,A ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase : List[Any] = 1
extremes.append(A )
return extremes
def _A ( A ,A ,A ) -> Optional[Any]:
global _shared_dataset
lowercase : Dict = dataset
lowercase : int = []
lowercase : int = partial(_find_cluster_extremes_shared ,jaccard_threshold=A )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
A ,A ,) ,total=len(A ) ,):
extremes_list.append(A )
return extremes_list
def _A ( A ,A = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
lowercase : Dict = make_duplicate_clusters(A ,A )
lowercase : List[str] = {x["base_index"] for cluster in duplicate_clusters for x in cluster}
lowercase : Any = {}
lowercase : int = find_extremes(A ,A ,A )
for extremes in extremes_clusters:
for element in extremes:
lowercase : str = element
lowercase : str = duplicate_indices - set(extreme_dict.keys() )
lowercase : Any = dataset.filter(lambda A ,A : idx not in remove_indices ,with_indices=A )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase : List[str] = element["base_index"] in extreme_dict
if element["is_extreme"]:
lowercase : str = extreme_dict[element["base_index"]]["copies"]
print(F'''Original dataset size: {len(A )}''' )
print(F'''Number of duplicate clusters: {len(A )}''' )
print(F'''Files in duplicate cluster: {len(A )}''' )
print(F'''Unique files in duplicate cluster: {len(A )}''' )
print(F'''Filtered dataset size: {len(A )}''' )
return ds_filter, duplicate_clusters
| 372
| 1
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class a__( lowerCamelCase__ ):
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def lowercase_ ( self : Optional[int] , **__snake_case : Optional[Any] ):
a : Optional[Any] = {
'num_train_timesteps': 2_01,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**__snake_case )
return config
def lowercase_ ( self : Optional[Any] ):
a : str = 10
a : List[str] = self.get_scheduler_config()
a : Optional[Any] = self.scheduler_classes[0](**__snake_case )
scheduler.set_timesteps(__snake_case )
a : List[str] = scheduler.timesteps[0]
a : Dict = scheduler.timesteps[1]
a : Optional[int] = self.dummy_sample
a : Tuple = 0.1 * sample
a : Any = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
a : int = scheduler.step(__snake_case , __snake_case , __snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase_ ( self : Tuple ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def lowercase_ ( self : Any ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=__snake_case )
def lowercase_ ( self : Dict ):
a : int = self.scheduler_classes[0]
a : Optional[int] = self.get_scheduler_config()
a : Optional[int] = scheduler_class(**__snake_case )
a : Union[str, Any] = 1
scheduler.set_timesteps(__snake_case )
a : Dict = scheduler.timesteps
a : Dict = torch.manual_seed(0 )
a : Union[str, Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(__snake_case ):
# 1. scale model input
a : Tuple = scheduler.scale_model_input(__snake_case , __snake_case )
# 2. predict noise residual
a : Optional[int] = model(__snake_case , __snake_case )
# 3. predict previous sample x_t-1
a : Any = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
a : Dict = pred_prev_sample
a : Optional[Any] = torch.sum(torch.abs(__snake_case ) )
a : Optional[Any] = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def lowercase_ ( self : List[str] ):
a : Union[str, Any] = self.scheduler_classes[0]
a : Dict = self.get_scheduler_config()
a : Optional[int] = scheduler_class(**__snake_case )
a : int = [1_06, 0]
scheduler.set_timesteps(timesteps=__snake_case )
a : Optional[Any] = scheduler.timesteps
a : str = torch.manual_seed(0 )
a : Union[str, Any] = self.dummy_model()
a : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
a : str = scheduler.scale_model_input(__snake_case , __snake_case )
# 2. predict noise residual
a : Dict = model(__snake_case , __snake_case )
# 3. predict previous sample x_t-1
a : str = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case ).prev_sample
a : Tuple = pred_prev_sample
a : str = torch.sum(torch.abs(__snake_case ) )
a : Tuple = torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def lowercase_ ( self : List[Any] ):
a : List[str] = self.scheduler_classes[0]
a : Optional[int] = self.get_scheduler_config()
a : Any = scheduler_class(**__snake_case )
a : Optional[int] = [39, 30, 12, 15, 0]
with self.assertRaises(__snake_case , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__snake_case )
def lowercase_ ( self : List[str] ):
a : List[Any] = self.scheduler_classes[0]
a : List[str] = self.get_scheduler_config()
a : Optional[Any] = scheduler_class(**__snake_case )
a : Union[str, Any] = [39, 30, 12, 1, 0]
a : Optional[Any] = len(__snake_case )
with self.assertRaises(__snake_case , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__snake_case , timesteps=__snake_case )
def lowercase_ ( self : Tuple ):
a : Tuple = self.scheduler_classes[0]
a : Tuple = self.get_scheduler_config()
a : Union[str, Any] = scheduler_class(**__snake_case )
a : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__snake_case , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__snake_case )
| 195
|
'''simple docstring'''
class a__:
def __init__( self : Dict , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Tuple ):
a : List[str] = name
a : Dict = value
a : List[str] = weight
def __repr__( self : int ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowercase_ ( self : Optional[int] ):
return self.value
def lowercase_ ( self : List[str] ):
return self.name
def lowercase_ ( self : int ):
return self.weight
def lowercase_ ( self : List[str] ):
return self.value / self.weight
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = []
for i in range(len(_A ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[Any] = sorted(_A , key=_A , reverse=_A )
a : Optional[int] = []
a , a : str = 0.0, 0.0
for i in range(len(_A ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.