code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class a ( a_ ):
def __init__( self , _lowerCamelCase=None , **_lowerCamelCase ):
warnings.warn(
'`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '
'instead.' , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 220 |
"""simple docstring"""
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=__snake_case , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=__snake_case , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=__snake_case , help='where to store parsed gold_data_path file' , )
lowercase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
lowercase = json.load(__snake_case )
for dpr_record in tqdm(__snake_case ):
lowercase = dpr_record['question']
lowercase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(__snake_case ) + '\n' )
if __name__ == "__main__":
main()
| 220 | 1 |
"""simple docstring"""
def a__ ( snake_case__ ) -> List[Any]:
lowerCamelCase , lowerCamelCase = [], []
while len(snake_case__ ) > 1:
lowerCamelCase , lowerCamelCase = min(snake_case__ ), max(snake_case__ )
start.append(snake_case__ )
end.append(snake_case__ )
collection.remove(snake_case__ )
collection.remove(snake_case__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : Union[str, Any] = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 168 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = seq_length
lowerCamelCase = is_training
lowerCamelCase = use_input_mask
lowerCamelCase = use_token_type_ids
lowerCamelCase = use_labels
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_act
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = type_sequence_label_size
lowerCamelCase = initializer_range
lowerCamelCase = num_labels
lowerCamelCase = num_choices
lowerCamelCase = scope
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase = None
if self.use_input_mask:
lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase = None
if self.use_token_type_ids:
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , use_stable_embedding=_a , )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModel(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a )
lowerCamelCase = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = OpenLlamaModel(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , )
lowerCamelCase = model(_a , attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
lowerCamelCase = True
lowerCamelCase = True
lowerCamelCase = OpenLlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , use_cache=_a , )
lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , output_hidden_states=_a , )["""hidden_states"""][0]
lowerCamelCase = model(
_a , attention_mask=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , past_key_values=_a , output_hidden_states=_a , )["""hidden_states"""][0]
# select random slice
lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1e-3 ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) = config_and_inputs
lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__UpperCamelCase = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__UpperCamelCase = (
{
"feature-extraction": OpenLlamaModel,
"text-classification": OpenLlamaForSequenceClassification,
"text-generation": OpenLlamaForCausalLM,
"zero-shot": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = OpenLlamaModelTester(self )
lowerCamelCase = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase = type
self.model_tester.create_and_check_model(*_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """single_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = 3
lowerCamelCase = """multi_label_classification"""
lowerCamelCase = input_dict["""input_ids"""]
lowerCamelCase = input_ids.ne(1 ).to(_a )
lowerCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase = OpenLlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
lowerCamelCase = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCamelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = OpenLlamaModel(_a )
original_model.to(_a )
original_model.eval()
lowerCamelCase = original_model(_a ).last_hidden_state
lowerCamelCase = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase = {"""type""": scaling_type, """factor""": 10.0}
lowerCamelCase = OpenLlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
lowerCamelCase = scaled_model(_a ).last_hidden_state
lowerCamelCase = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a , _a , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a , _a , atol=1e-5 ) )
| 168 | 1 |
from collections.abc import Sequence
def __snake_case ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ) -> float:
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ) -> float:
A_ : Tuple = 0.0
for coeff in reversed(_lowerCAmelCase ):
A_ : Tuple = result * x + coeff
return result
if __name__ == "__main__":
_lowerCAmelCase : Dict = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCAmelCase : Optional[int] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 300 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Union[str, Any] , *snake_case :Tuple , **snake_case :Any ):
'''simple docstring'''
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ =get_tests_dir('fixtures/test_sentencepiece.model')
UpperCamelCase__ ={'target_lang': 'fi', 'source_lang': 'en'}
UpperCamelCase__ ='>>zh<<'
UpperCamelCase__ ='Helsinki-NLP/'
if is_torch_available():
UpperCamelCase__ ='pt'
elif is_tf_available():
UpperCamelCase__ ='tf'
else:
UpperCamelCase__ ='jax'
@require_sentencepiece
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = MarianTokenizer
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> Tuple:
super().setUp()
_SCREAMING_SNAKE_CASE : Dict = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
_SCREAMING_SNAKE_CASE : List[str] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_SCREAMING_SNAKE_CASE : Dict = Path(self.tmpdirname )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab"] )
save_json(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["tokenizer_config_file"] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["source_spm"] )
copyfile(__lowerCamelCase , save_dir / VOCAB_FILES_NAMES["target_spm"] )
_SCREAMING_SNAKE_CASE : Optional[Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> MarianTokenizer:
return MarianTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Union[str, Any]:
return (
"This is a test",
"This is a test",
)
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : int = "</s>"
_SCREAMING_SNAKE_CASE : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__lowerCamelCase ) , 9 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Tuple = MarianTokenizer.from_pretrained(F"""{ORG_NAME}opus-mt-en-de""" )
_SCREAMING_SNAKE_CASE : Tuple = en_de_tokenizer(["I am a small frog"] , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = [3_8, 1_2_1, 1_4, 6_9_7, 3_8_8_4_8, 0]
self.assertListEqual(__lowerCamelCase , batch.input_ids[0] )
_SCREAMING_SNAKE_CASE : List[Any] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = [x.name for x in Path(__lowerCamelCase ).glob("*" )]
self.assertIn("source.spm" , __lowerCamelCase )
MarianTokenizer.from_pretrained(__lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : List[str] = tok(
["I am a small frog" * 1_0_0_0, "I am a small frog"] , padding=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2) )
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
_SCREAMING_SNAKE_CASE : Tuple = tok(["I am a tiny frog", "I am a small frog"] , padding=__lowerCamelCase , return_tensors=__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 1_0) )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
# fmt: off
_SCREAMING_SNAKE_CASE : str = {"input_ids": [[4_3_4_9_5, 4_6_2, 2_0, 4_2_1_6_4, 1_3_6_9, 5_2, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 7_4_9_1, 3_8_9_9_9, 6, 8, 4_6_4, 1_3_2, 1_7_0_3, 4_9_2, 1_3, 4_6_6_9, 3_7_8_6_7, 1_3, 7_5_2_5, 2_7, 1_5_9_3, 9_8_8, 1_3, 3_3_9_7_2, 7_0_2_9, 6, 2_0, 8_2_5_1, 3_8_3, 2, 2_7_0, 5_8_6_6, 3_7_8_8, 2, 2_3_5_3, 8_2_5_1, 1_2_3_3_8, 2, 1_3_9_5_8, 3_8_7, 2, 3_6_2_9, 6_9_5_3, 1_8_8, 2_9_0_0, 2, 1_3_9_5_8, 8_0_1_1, 1_1_5_0_1, 2_3, 8_4_6_0, 4_0_7_3, 3_4_0_0_9, 2_0, 4_3_5, 1_1_4_3_9, 2_7, 8, 8_4_6_0, 4_0_7_3, 6_0_0_4, 2_0, 9_9_8_8, 3_7_5, 2_7, 3_3, 2_6_6, 1_9_4_5, 1_0_7_6, 1_3_5_0, 3_7_8_6_7, 3_2_8_8, 5, 5_7_7, 1_0_7_6, 4_3_7_4, 8, 5_0_8_2, 5, 2_6_4_5_3, 2_5_7, 5_5_6, 4_0_3, 2, 2_4_2, 1_3_2, 3_8_3, 3_1_6, 4_9_2, 8, 1_0_7_6_7, 6, 3_1_6, 3_0_4, 4_2_3_9, 3, 0], [1_4_8, 1_5_7_2_2, 1_9, 1_8_3_9, 1_2, 1_3_5_0, 1_3, 2_2_3_2_7, 5_0_8_2, 5_4_1_8, 4_7_5_6_7, 3_5_9_3_8, 5_9, 3_1_8, 1_9_5_5_2, 1_0_8, 2_1_8_3, 5_4, 1_4_9_7_6, 4_8_3_5, 3_2, 5_4_7, 1_1_1_4, 8, 3_1_5, 2_4_1_7, 5, 9_2, 1_9_0_8_8, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0], [3_6, 6_3_9_5, 1_2_5_7_0, 3_9_1_4_7, 1_1_5_9_7, 6, 2_6_6, 4, 4_5_4_0_5, 7_2_9_6, 3, 0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0, 5_8_1_0_0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="Helsinki-NLP/opus-mt-en-de" , revision="1a8c2263da11e68e50938f97e10cd57820bd504c" , decode_kwargs={"use_source_tokenizer": True} , )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = MarianTokenizer.from_pretrained("hf-internal-testing/test-marian-two-vocabs" )
_SCREAMING_SNAKE_CASE : int = "Tämä on testi"
_SCREAMING_SNAKE_CASE : Any = "This is a test"
_SCREAMING_SNAKE_CASE : Dict = [7_6, 7, 2_0_4_7, 2]
_SCREAMING_SNAKE_CASE : Optional[Any] = [6_9, 1_2, 1_1, 9_4_0, 2]
_SCREAMING_SNAKE_CASE : Tuple = tokenizer(__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer(text_target=__lowerCamelCase ).input_ids
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = tokenizer.decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase )
self.assertEqual(__lowerCamelCase , __lowerCamelCase ) | 325 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : typing.Counter[int] = Counter()
for base in range(1, max_perimeter + 1 ):
for perpendicular in range(__lowerCamelCase, max_perimeter + 1 ):
_SCREAMING_SNAKE_CASE : List[Any] = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ (__lowerCamelCase = 1000 ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = pythagorean_triple(__lowerCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions") | 325 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = 42
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
@register_to_config
def __init__( self, lowerCAmelCase__ = 6_5536, lowerCAmelCase__ = None, lowerCAmelCase__ = 2, lowerCAmelCase__ = 2, lowerCAmelCase__ = 0, lowerCAmelCase__ = "fourier", lowerCAmelCase__ = True, lowerCAmelCase__ = False, lowerCAmelCase__ = 0.0, lowerCAmelCase__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), lowerCAmelCase__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), lowerCAmelCase__ = "UNetMidBlock1D", lowerCAmelCase__ = None, lowerCAmelCase__ = (32, 32, 64), lowerCAmelCase__ = None, lowerCAmelCase__ = 8, lowerCAmelCase__ = 1, lowerCAmelCase__ = False, ) -> Union[str, Any]:
super().__init__()
snake_case_ = sample_size
# time
if time_embedding_type == "fourier":
snake_case_ = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=lowerCAmelCase__, log=lowerCAmelCase__, flip_sin_to_cos=lowerCAmelCase__)
snake_case_ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
snake_case_ = Timesteps(
block_out_channels[0], flip_sin_to_cos=lowerCAmelCase__, downscale_freq_shift=lowerCAmelCase__)
snake_case_ = block_out_channels[0]
if use_timestep_embedding:
snake_case_ = block_out_channels[0] * 4
snake_case_ = TimestepEmbedding(
in_channels=lowerCAmelCase__, time_embed_dim=lowerCAmelCase__, act_fn=lowerCAmelCase__, out_dim=block_out_channels[0], )
snake_case_ = nn.ModuleList([])
snake_case_ = None
snake_case_ = nn.ModuleList([])
snake_case_ = None
# down
snake_case_ = in_channels
for i, down_block_type in enumerate(lowerCAmelCase__):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
snake_case_ = i == len(lowerCAmelCase__) - 1
snake_case_ = get_down_block(
lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(lowerCAmelCase__)
# mid
snake_case_ = get_mid_block(
lowerCAmelCase__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=lowerCAmelCase__, add_downsample=lowerCAmelCase__, )
# up
snake_case_ = list(reversed(lowerCAmelCase__))
snake_case_ = reversed_block_out_channels[0]
if out_block_type is None:
snake_case_ = out_channels
else:
snake_case_ = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__):
snake_case_ = output_channel
snake_case_ = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase__) - 1 else final_upsample_channels
)
snake_case_ = i == len(lowerCAmelCase__) - 1
snake_case_ = get_up_block(
lowerCAmelCase__, num_layers=lowerCAmelCase__, in_channels=lowerCAmelCase__, out_channels=lowerCAmelCase__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(lowerCAmelCase__)
snake_case_ = output_channel
# out
snake_case_ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32)
snake_case_ = get_out_block(
out_block_type=lowerCAmelCase__, num_groups_out=lowerCAmelCase__, embed_dim=block_out_channels[0], out_channels=lowerCAmelCase__, act_fn=lowerCAmelCase__, fc_dim=block_out_channels[-1] // 4, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[UNetaDOutput, Tuple]:
snake_case_ = timestep
if not torch.is_tensor(lowerCAmelCase__):
snake_case_ = torch.tensor([timesteps], dtype=torch.long, device=sample.device)
elif torch.is_tensor(lowerCAmelCase__) and len(timesteps.shape) == 0:
snake_case_ = timesteps[None].to(sample.device)
snake_case_ = self.time_proj(lowerCAmelCase__)
if self.config.use_timestep_embedding:
snake_case_ = self.time_mlp(lowerCAmelCase__)
else:
snake_case_ = timestep_embed[..., None]
snake_case_ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
snake_case_ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
snake_case_ = ()
for downsample_block in self.down_blocks:
snake_case_ , snake_case_ = downsample_block(hidden_states=lowerCAmelCase__, temb=lowerCAmelCase__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
snake_case_ = self.mid_block(lowerCAmelCase__, lowerCAmelCase__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
snake_case_ = down_block_res_samples[-1:]
snake_case_ = down_block_res_samples[:-1]
snake_case_ = upsample_block(lowerCAmelCase__, res_hidden_states_tuple=lowerCAmelCase__, temb=lowerCAmelCase__)
# 5. post-process
if self.out_block:
snake_case_ = self.out_block(lowerCAmelCase__, lowerCAmelCase__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase__)
| 69 | """simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = '''T5Config'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "mt5"
SCREAMING_SNAKE_CASE_ = MTaConfig
| 69 | 1 |
import warnings
warnings.warn(
'''memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '''
'''`from accelerate import find_executable_batch_size` to avoid this warning.''',
FutureWarning,
)
| 173 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __SCREAMING_SNAKE_CASE ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 1.0 , SCREAMING_SNAKE_CASE__ = None , ):
super().__init__()
lowercase : str = initial_learning_rate
lowercase : Optional[Any] = warmup_steps
lowercase : Union[str, Any] = power
lowercase : List[str] = decay_schedule_fn
lowercase : List[str] = name
def __call__( self , SCREAMING_SNAKE_CASE__ ):
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowercase : Optional[Any] = tf.cast(SCREAMING_SNAKE_CASE__ , tf.floataa )
lowercase : Tuple = tf.cast(self.warmup_steps , tf.floataa )
lowercase : Optional[Any] = global_step_float / warmup_steps_float
lowercase : Union[str, Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE__ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase = 0.0, _UpperCamelCase = 0.9, _UpperCamelCase = 0.9_9_9, _UpperCamelCase = 1e-8, _UpperCamelCase = None, _UpperCamelCase = None, _UpperCamelCase = 0.0, _UpperCamelCase = 1.0, _UpperCamelCase = None, ) ->Any:
"""simple docstring"""
lowercase : List[str] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_UpperCamelCase, decay_steps=num_train_steps - num_warmup_steps, end_learning_rate=init_lr * min_lr_ratio, power=_UpperCamelCase, )
if num_warmup_steps:
lowercase : Tuple = WarmUp(
initial_learning_rate=_UpperCamelCase, decay_schedule_fn=_UpperCamelCase, warmup_steps=_UpperCamelCase, )
if weight_decay_rate > 0.0:
lowercase : Tuple = AdamWeightDecay(
learning_rate=_UpperCamelCase, weight_decay_rate=_UpperCamelCase, beta_a=_UpperCamelCase, beta_a=_UpperCamelCase, epsilon=_UpperCamelCase, clipnorm=_UpperCamelCase, global_clipnorm=_UpperCamelCase, exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''], include_in_weight_decay=_UpperCamelCase, )
else:
lowercase : Union[str, Any] = tf.keras.optimizers.Adam(
learning_rate=_UpperCamelCase, beta_a=_UpperCamelCase, beta_a=_UpperCamelCase, epsilon=_UpperCamelCase, clipnorm=_UpperCamelCase, global_clipnorm=_UpperCamelCase, )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , SCREAMING_SNAKE_CASE__ = 0.001 , SCREAMING_SNAKE_CASE__ = 0.9 , SCREAMING_SNAKE_CASE__ = 0.999 , SCREAMING_SNAKE_CASE__ = 1E-7 , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : str = weight_decay_rate
lowercase : int = include_in_weight_decay
lowercase : str = exclude_from_weight_decay
@classmethod
def __lowerCamelCase ( cls , SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = {'''WarmUp''': WarmUp}
return super(SCREAMING_SNAKE_CASE__ , cls ).from_config(SCREAMING_SNAKE_CASE__ , custom_objects=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
super(SCREAMING_SNAKE_CASE__ , self )._prepare_local(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : int = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
lowercase , lowercase : Tuple = list(zip(*SCREAMING_SNAKE_CASE__ ) )
return super(SCREAMING_SNAKE_CASE__ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , name=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowercase : Tuple = apply_state or {}
lowercase : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowercase : Dict = self._fallback_apply_state(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase , lowercase : int = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ )
lowercase : str = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
lowercase , lowercase : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self._decay_weights_op(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE__ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) is not None:
return False
return True
class __SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self ):
lowercase : Optional[Any] = []
lowercase : Tuple = None
@property
def __lowerCamelCase ( self ):
if self._accum_steps is None:
lowercase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def __lowerCamelCase ( self ):
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE__ ):
if not self._gradients:
lowercase : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE__ ) , trainable=SCREAMING_SNAKE_CASE__ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE__ ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE__ )}""" )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE__ )
self._accum_steps.assign_add(1 )
def __lowerCamelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE__ ) )
| 173 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_UpperCAmelCase , )
assert hasattr(self , '''env''' )
def _lowerCamelCase ( self , _UpperCAmelCase ):
# configuration for running training on smdistributed Model Parallel
__a : Dict = {
'''enabled''': True,
'''processes_per_host''': 8,
}
__a : List[Any] = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
__a : Optional[int] = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
__a : List[str] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_UpperCAmelCase , py_version='''py36''' , )
def _lowerCamelCase ( self , _UpperCAmelCase ):
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def _lowerCamelCase ( self , _UpperCAmelCase ):
# create estimator
__a : str = self.create_estimator(_UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
__a : List[str] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__a : List[str] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__a : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__a : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _UpperCAmelCase ) | 160 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=True , ):
__a : Dict = parent
__a : List[str] = batch_size
__a : str = num_channels
__a : Optional[int] = image_size
__a : Tuple = min_resolution
__a : str = max_resolution
__a : List[str] = do_resize
__a : Dict = size_divisor
__a : Dict = do_rescale
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : str = GLPNImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''resample''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_rescale''' ) )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 160 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_a , _a , _a = False, False, False
@dataclass
class _UpperCAmelCase:
lowercase__ = None
lowercase__ = True
lowercase__ = True
lowercase__ = None
# Automatically constructed
lowercase__ = "dict"
lowercase__ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowercase__ = field(default='Audio' , init=lowerCamelCase , repr=lowerCamelCase )
def __call__( self) -> int:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self , __a) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''') from err
if isinstance(__a , __a):
return {"bytes": None, "path": value}
elif isinstance(__a , __a):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_UpperCamelCase = BytesIO()
sf.write(__a , value['''array'''] , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''') is not None and os.path.isfile(value['''path''']):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm'''):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''') is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''')
if value.get('''bytes'''):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_UpperCamelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa).astype(np.floataa) / 3_27_67
else:
_UpperCamelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''').astype(np.floataa) / 3_27_67
_UpperCamelCase = BytesIO(bytes())
sf.write(__a , __a , value['''sampling_rate'''] , format='''wav''')
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''')}
elif value.get('''bytes''') is not None or value.get('''path''') is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes'''), "path": value.get('''path''')}
else:
raise ValueError(
F'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''')
def UpperCAmelCase ( self , __a , __a = None) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''')
_UpperCamelCase , _UpperCamelCase = (value['''path'''], BytesIO(value['''bytes'''])) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''')
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''') from err
_UpperCamelCase = xsplitext(__a)[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''')
if file is None:
_UpperCamelCase = token_per_repo_id or {}
_UpperCamelCase = path.split('''::''')[-1]
try:
_UpperCamelCase = string_to_dict(__a , config.HUB_DATASETS_URL)['''repo_id''']
_UpperCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_UpperCamelCase = None
with xopen(__a , '''rb''' , use_auth_token=__a) as f:
_UpperCamelCase , _UpperCamelCase = sf.read(__a)
else:
_UpperCamelCase , _UpperCamelCase = sf.read(__a)
_UpperCamelCase = array.T
if self.mono:
_UpperCamelCase = librosa.to_mono(__a)
if self.sampling_rate and self.sampling_rate != sampling_rate:
_UpperCamelCase = librosa.resample(__a , orig_sr=__a , target_sr=self.sampling_rate)
_UpperCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''')
return {
"bytes": Value('''binary'''),
"path": Value('''string'''),
}
def UpperCAmelCase ( self , __a) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type):
_UpperCamelCase = pa.array([None] * len(__a) , type=pa.binary())
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
_UpperCamelCase = pa.array([None] * len(__a) , type=pa.string())
_UpperCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices('''array'''):
_UpperCamelCase = pa.array([Audio().encode_example(__a) if x is not None else None for x in storage.to_pylist()])
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index('''bytes''') >= 0:
_UpperCamelCase = storage.field('''bytes''')
else:
_UpperCamelCase = pa.array([None] * len(__a) , type=pa.binary())
if storage.type.get_field_index('''path''') >= 0:
_UpperCamelCase = storage.field('''path''')
else:
_UpperCamelCase = pa.array([None] * len(__a) , type=pa.string())
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null())
return array_cast(__a , self.pa_type)
def UpperCAmelCase ( self , __a) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__a):
with xopen(__a , '''rb''') as f:
_UpperCamelCase = f.read()
return bytes_
_UpperCamelCase = pa.array(
[
(path_to_bytes(x['''path''']) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_UpperCamelCase = pa.array(
[os.path.basename(__a) if path is not None else None for path in storage.field('''path''').to_pylist()] , type=pa.string() , )
_UpperCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null())
return array_cast(__a , self.pa_type)
| 100 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=16 , __a=36 , __a=6 , __a=6 , __a=6 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , sentence_order_label=__a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCamelCase = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
def UpperCAmelCase ( self , __a , __a , __a=False) -> Tuple:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(__a , __a , return_labels=__a)
if return_labels:
if model_class in get_values(__a):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__a)
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__a)
return inputs_dict
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''')
_UpperCamelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]])
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
_UpperCamelCase = model(__a , attention_mask=__a)[0]
_UpperCamelCase = torch.Size((1, 11, 7_68))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4))
| 100 | 1 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
snake_case : list[list[int]] = []
snake_case : list[int] = []
snake_case : Optional[Any] = 0
snake_case : Optional[Any] = sum(__lowerCamelCase )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return result
def UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , ):
if sum(__lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(__lowerCamelCase )) < max_sum:
return
if sum(__lowerCamelCase ) == max_sum:
result.append(__lowerCamelCase )
return
for index in range(__lowerCamelCase , len(__lowerCamelCase ) ):
create_state_space_tree(
__lowerCamelCase , __lowerCamelCase , index + 1 , [*path, nums[index]] , __lowerCamelCase , remaining_nums_sum - nums[index] , )
__lowerCamelCase = [3, 34, 4, 12, 5, 2]
__lowerCamelCase = 9
__lowerCamelCase = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 59 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
__lowerCamelCase = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
__lowerCamelCase = TaTokenizerFast
__lowerCamelCase = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
__lowerCamelCase = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 59 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __lowerCAmelCase ( lowercase : list ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
snake_case : Union[str, Any] = {"+", "-", "*", "/"}
snake_case : list[Any] = []
for token in postfix_notation:
if token in operations:
snake_case ,snake_case : Dict = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __lowerCAmelCase ( lowercase : int ) -> Tuple:
"""simple docstring"""
snake_case : Any = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , lowercase ).groups()[0]
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = file_names
snake_case : Optional[Any] = image_transform
snake_case : Optional[int] = label_to_id
def __len__( self ) -> Tuple:
'''simple docstring'''
return len(self.file_names )
def __getitem__( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
snake_case : str = self.file_names[idx]
snake_case : Any = PIL.Image.open(UpperCamelCase__ )
snake_case : Optional[int] = raw_image.convert("RGB" )
if self.image_transform is not None:
snake_case : Optional[Any] = self.image_transform(UpperCamelCase__ )
snake_case : Optional[Any] = extract_label(UpperCamelCase__ )
if self.label_to_id is not None:
snake_case : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def __lowerCAmelCase ( lowercase : Any , lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
if args.with_tracking:
snake_case : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
snake_case : Optional[int] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : str = config["lr"]
snake_case : Union[str, Any] = int(config["num_epochs"] )
snake_case : str = int(config["seed"] )
snake_case : str = int(config["batch_size"] )
snake_case : Any = config["image_size"]
if not isinstance(lowercase , (list, tuple) ):
snake_case : str = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
snake_case : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
snake_case : Any = int(args.checkpointing_steps )
else:
raise ValueError(
F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' )
else:
snake_case : List[str] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
snake_case : Union[str, Any] = os.path.split(lowercase )[-1].split("." )[0]
accelerator.init_trackers(lowercase , lowercase )
# Grab all the image filenames
snake_case : int = [os.path.join(args.data_dir , lowercase ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
snake_case : Union[str, Any] = [extract_label(lowercase ) for fname in file_names]
snake_case : Any = list(set(lowercase ) )
id_to_label.sort()
snake_case : int = {lbl: i for i, lbl in enumerate(lowercase )}
# Set the seed before splitting the data.
np.random.seed(lowercase )
torch.manual_seed(lowercase )
torch.cuda.manual_seed_all(lowercase )
# Split our filenames between train and validation
snake_case : Optional[Any] = np.random.permutation(len(lowercase ) )
snake_case : int = int(0.8 * len(lowercase ) )
snake_case : int = random_perm[:cut]
snake_case : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
snake_case : List[Any] = Compose([RandomResizedCrop(lowercase , scale=(0.5, 1.0) ), ToTensor()] )
snake_case : List[str] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase , label_to_id=lowercase )
# For evaluation, we use a deterministic Resize
snake_case : Optional[Any] = Compose([Resize(lowercase ), ToTensor()] )
snake_case : Optional[Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase , label_to_id=lowercase )
# Instantiate dataloaders.
snake_case : Optional[Any] = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
snake_case : Tuple = DataLoader(lowercase , shuffle=lowercase , batch_size=lowercase , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[int] = create_model("resnet50d" , pretrained=lowercase , num_classes=len(lowercase ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Any = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
snake_case : Dict = False
for param in model.get_classifier().parameters():
snake_case : List[Any] = True
# We normalize the batches of images to be a bit faster.
snake_case : Dict = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
snake_case : Union[str, Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
snake_case : int = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
snake_case : Dict = OneCycleLR(optimizer=lowercase , max_lr=lowercase , epochs=lowercase , steps_per_epoch=len(lowercase ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case ,snake_case ,snake_case ,snake_case ,snake_case : List[str] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# We need to keep track of how many total steps we have iterated over
snake_case : List[Any] = 0
# We also need to keep track of the starting epoch so files are named properly
snake_case : Optional[int] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' )
accelerator.load_state(args.resume_from_checkpoint )
snake_case : List[str] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
snake_case : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
snake_case : int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
snake_case : Union[str, Any] = os.path.splitext(lowercase )[0]
if "epoch" in training_difference:
snake_case : Any = int(training_difference.replace("epoch_" , "" ) ) + 1
snake_case : int = None
else:
snake_case : Any = int(training_difference.replace("step_" , "" ) )
snake_case : Optional[int] = resume_step // len(lowercase )
resume_step -= starting_epoch * len(lowercase )
# Now we train the model
for epoch in range(lowercase , lowercase ):
model.train()
if args.with_tracking:
snake_case : Union[str, Any] = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
snake_case : List[str] = accelerator.skip_first_batches(lowercase , lowercase )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
snake_case : Any = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case : List[Any] = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case : Optional[int] = (batch["image"] - mean) / std
snake_case : str = model(lowercase )
snake_case : Dict = torch.nn.functional.cross_entropy(lowercase , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase , lowercase ):
snake_case : Any = F'step_{overall_step}'
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
snake_case : List[str] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
model.eval()
snake_case : List[str] = 0
snake_case : List[str] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
snake_case : int = {k: v.to(accelerator.device ) for k, v in batch.items()}
snake_case : Tuple = (batch["image"] - mean) / std
with torch.no_grad():
snake_case : Optional[int] = model(lowercase )
snake_case : List[Any] = outputs.argmax(dim=-1 )
snake_case ,snake_case : int = accelerator.gather_for_metrics((predictions, batch["label"]) )
snake_case : Union[str, Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
snake_case : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(lowercase ),
"epoch": epoch,
} , step=lowercase , )
if checkpointing_steps == "epoch":
snake_case : Optional[Any] = F'epoch_{epoch}'
if args.output_dir is not None:
snake_case : Union[str, Any] = os.path.join(args.output_dir , lowercase )
accelerator.save_state(lowercase )
if args.with_tracking:
accelerator.end_training()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : Optional[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=lowercase , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=lowercase , default=lowercase , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=lowercase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=lowercase , default=lowercase , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=lowercase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
snake_case : Optional[Any] = parser.parse_args()
snake_case : List[str] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 112 | 1 |
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Tuple =credit_card_number
a__ : Union[str, Any] =0
a__ : List[str] =len(SCREAMING_SNAKE_CASE ) - 2
for i in range(SCREAMING_SNAKE_CASE , -1 , -2 ):
# double the value of every second digit
a__ : List[Any] =int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
a__ : List[str] =cc_number[:i] + str(SCREAMING_SNAKE_CASE ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(SCREAMING_SNAKE_CASE ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : List[Any] =f'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(f'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(SCREAMING_SNAKE_CASE ) <= 16:
print(f'''{error_message} of its length.''' )
return False
if not validate_initial_digits(SCREAMING_SNAKE_CASE ):
print(f'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(SCREAMING_SNAKE_CASE ):
print(f'''{error_message} it fails the Luhn check.''' )
return False
print(f'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 95 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class A ( A_ , unittest.TestCase ):
UpperCamelCase_ : Any =PriorTransformer
UpperCamelCase_ : List[str] ='''hidden_states'''
@property
def _A (self ):
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= 4
__lowercase= 8
__lowercase= 7
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _A (self ):
return (4, 8)
@property
def _A (self ):
return (4, 8)
def _A (self ):
__lowercase= {
'num_attention_heads': 2,
'attention_head_dim': 4,
'num_layers': 2,
'embedding_dim': 8,
'num_embeddings': 7,
'additional_embeddings': 4,
}
__lowercase= self.dummy_input
return init_dict, inputs_dict
def _A (self ):
__lowercase, __lowercase= PriorTransformer.from_pretrained(
'hf-internal-testing/prior-dummy' , output_loading_info=lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(lowerCAmelCase )
__lowercase= model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _A (self ):
__lowercase, __lowercase= self.prepare_init_args_and_inputs_for_common()
__lowercase= self.model_class(**lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['hidden_states', 'timestep']
self.assertListEqual(arg_names[:2] , lowerCAmelCase )
def _A (self ):
__lowercase= PriorTransformer.from_pretrained('hf-internal-testing/prior-dummy' )
__lowercase= model.to(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'set_default_attn_processor' ):
model.set_default_attn_processor()
__lowercase= self.get_dummy_seed_input()
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
__lowercase= output[0, :5].flatten().cpu()
print(lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase= torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] )
self.assertTrue(torch_all_close(lowerCAmelCase , lowerCAmelCase , rtol=1E-2 ) )
@slow
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase=1 , lowerCAmelCase=7_6_8 , lowerCAmelCase=7_7 , lowerCAmelCase=0 ):
torch.manual_seed(lowerCAmelCase )
__lowercase= batch_size
__lowercase= embedding_dim
__lowercase= num_embeddings
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase )
__lowercase= torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]],
[3_7, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase ):
__lowercase= PriorTransformer.from_pretrained('kandinsky-community/kandinsky-2-1-prior' , subfolder='prior' )
model.to(lowerCAmelCase )
__lowercase= self.get_dummy_seed_input(seed=lowerCAmelCase )
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase= sample[0, :8].flatten().cpu()
print(lowerCAmelCase )
__lowercase= torch.tensor(lowerCAmelCase )
assert torch_all_close(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
| 295 | 0 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
__UpperCAmelCase : int = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
UpperCamelCase : Tuple = parent.find_all(child.name , recursive=__SCREAMING_SNAKE_CASE )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(__SCREAMING_SNAKE_CASE ) else next(i for i, s in enumerate(__SCREAMING_SNAKE_CASE , 1 ) if s is child ) )
UpperCamelCase : Optional[Any] = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = BeautifulSoup(__SCREAMING_SNAKE_CASE , '''html.parser''' )
UpperCamelCase : Union[str, Any] = []
UpperCamelCase : List[str] = []
UpperCamelCase : str = []
for element in html_code.descendants:
if type(__SCREAMING_SNAKE_CASE ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
UpperCamelCase : Any = html.unescape(__SCREAMING_SNAKE_CASE ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = self.xpath_soup(__SCREAMING_SNAKE_CASE )
stringaxtag_seq.append(__SCREAMING_SNAKE_CASE )
stringaxsubs_seq.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = ''''''
for tagname, subs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
xpath += f"""/{tagname}"""
if subs != 0:
xpath += f"""[{subs}]"""
return xpath
def __call__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = False
# Check that strings has a valid type
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = True
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ):
if len(__SCREAMING_SNAKE_CASE ) == 0 or isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
f"""but is of type {type(__SCREAMING_SNAKE_CASE )}.""" )
UpperCamelCase : int = bool(isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and (isinstance(html_strings[0] , __SCREAMING_SNAKE_CASE )) )
if not is_batched:
UpperCamelCase : Union[str, Any] = [html_strings]
# Get nodes + xpaths
UpperCamelCase : str = []
UpperCamelCase : int = []
for html_string in html_strings:
UpperCamelCase , UpperCamelCase , UpperCamelCase : Dict = self.get_three_from_single(__SCREAMING_SNAKE_CASE )
nodes.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = []
for node, tag_list, sub_list in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = self.construct_xpath(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
xpath_strings.append(__SCREAMING_SNAKE_CASE )
xpaths.append(__SCREAMING_SNAKE_CASE )
# return as Dict
UpperCamelCase : List[str] = {'''nodes''': nodes, '''xpaths''': xpaths}
UpperCamelCase : List[Any] = BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
return encoded_inputs
| 315 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Union[str, Any] = {
"configuration_mgp_str": ["MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP", "MgpstrConfig"],
"processing_mgp_str": ["MgpstrProcessor"],
"tokenization_mgp_str": ["MgpstrTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST",
"MgpstrModel",
"MgpstrPreTrainedModel",
"MgpstrForSceneTextRecognition",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 315 | 1 |
"""simple docstring"""
def _A ( lowercase = 1_00 ):
"""simple docstring"""
a =(n * (n + 1) // 2) ** 2
a =n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }') | 81 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
a_ =IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Union[str, Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase : Any = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Union[str, Any] = CLIPTextModel(_a )
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : int = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Union[str, Any] , _a : Any , _a : List[str]=0 ) -> List[str]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Any = torch.manual_seed(_a )
else:
__lowerCamelCase : Any = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : Any = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , )
__lowerCamelCase : Optional[Any] = floats_tensor(control_image.shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : int = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : int = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : List[str] ) -> Optional[Any]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : str ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : Any ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =StableDiffusionControlNetImgaImgPipeline
a_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
a_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def _lowercase ( self : List[Any] ) -> List[str]:
torch.manual_seed(0 )
__lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(_a : str ):
if isinstance(_a , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : Any = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(_a )
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
__lowerCamelCase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Dict = CLIPTextModel(_a )
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : List[str] = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase : str = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Dict , _a : int , _a : Union[str, Any]=0 ) -> Union[str, Any]:
if str(_a ).startswith('mps' ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(_a )
else:
__lowerCamelCase : Union[str, Any] = torch.Generator(device=_a ).manual_seed(_a )
__lowerCamelCase : Any = 2
__lowerCamelCase : Any = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=_a , device=torch.device(_a ) , ),
]
__lowerCamelCase : List[Any] = floats_tensor(control_image[0].shape , rng=random.Random(_a ) ).to(_a )
__lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Dict = Image.fromarray(np.uinta(_a ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase : Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Any = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**_a )
pipe.to(_a )
__lowerCamelCase : Optional[int] = 10.0
__lowerCamelCase : Tuple = 4
__lowerCamelCase : str = self.get_dummy_inputs(_a )
__lowerCamelCase : int = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Optional[Any] = pipe(**_a )[0]
__lowerCamelCase : Dict = self.get_dummy_inputs(_a )
__lowerCamelCase : List[str] = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : Dict = pipe(**_a , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Union[str, Any] = steps
__lowerCamelCase : Any = scale
__lowerCamelCase : Optional[int] = pipe(**_a , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase : Any = self.get_dummy_inputs(_a )
__lowerCamelCase : Tuple = steps
__lowerCamelCase : List[Any] = scale
__lowerCamelCase : List[str] = pipe(**_a , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def _lowercase ( self : Optional[Any] ) -> List[str]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase ( self : Dict ) -> Tuple:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def _lowercase ( self : str ) -> int:
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def _lowercase ( self : List[Any] ) -> str:
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : List[Any] = self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_a )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : List[str] ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase : List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=_a , controlnet=_a )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_a )
__lowerCamelCase : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase : List[Any] = 'evil space-punk bird'
__lowerCamelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase : Any = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase : Dict = pipe(
_a , _a , control_image=_a , generator=_a , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 208 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[int]=100 , lowercase__ : int=" " ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def __UpperCamelCase ( lowercase__ : dict ) -> dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def __UpperCamelCase ( lowercase__ : dict , lowercase__ : DPRContextEncoder , lowercase__ : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ : Any = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __UpperCamelCase ( lowercase__ : "RagExampleArguments" , lowercase__ : "ProcessingArguments" , lowercase__ : "IndexHnswArguments" , ) -> Dict:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ : Any = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ : int = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ : Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
lowerCAmelCase_ : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ : List[Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ : List[str] = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
lowerCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ : List[str] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ )
# And save the index
lowerCAmelCase_ : List[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__snake_case : str = field(
default=str(Path(__UpperCamelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) ,metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} ,)
__snake_case : Optional[str] = field(
default=__UpperCamelCase ,metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} ,)
__snake_case : str = field(
default="""facebook/rag-sequence-nq""" ,metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} ,)
__snake_case : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" ,metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} ,)
__snake_case : Optional[str] = field(
default=str(Path(__UpperCamelCase ).parent / """test_run""" / """dummy-kb""" ) ,metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} ,)
@dataclass
class __a :
__snake_case : Optional[int] = field(
default=__UpperCamelCase ,metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} ,)
__snake_case : int = field(
default=16 ,metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} ,)
@dataclass
class __a :
__snake_case : int = field(
default=768 ,metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} ,)
__snake_case : int = field(
default=128 ,metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 28 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __snake_case , __snake_case ):
_UpperCAmelCase :Optional[int] = 'convnextv2'
def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase : Dict = num_channels
UpperCamelCase : Union[str, Any] = patch_size
UpperCamelCase : Union[str, Any] = num_stages
UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = initializer_range
UpperCamelCase : Tuple = layer_norm_eps
UpperCamelCase : str = drop_path_rate
UpperCamelCase : List[str] = image_size
UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
| 52 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52 | 1 |
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
A_ : Optional[int] = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_28, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
A_ : Union[str, Any] = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_55, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
A_ : List[Any] = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_55)
A_ : int = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
A_ : int = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
A_ : Union[str, Any] = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
A_ : Optional[int] = tf.keras.preprocessing.image.img_to_array(test_image)
A_ : int = np.expand_dims(test_image, axis=0)
A_ : Tuple = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
A_ : Any = "Normal"
if result[0][0] == 1:
A_ : Any = "Abnormality detected"
| 368 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class a_ :
'''simple docstring'''
def __init__(self, lowerCamelCase_, lowerCamelCase_=1_3, lowerCamelCase_=3_0, lowerCamelCase_=2, lowerCamelCase_=3, lowerCamelCase_=True, lowerCamelCase_=True, lowerCamelCase_=3_2, lowerCamelCase_=2, lowerCamelCase_=4, lowerCamelCase_=3_7, lowerCamelCase_="gelu", lowerCamelCase_=0.1, lowerCamelCase_=0.1, lowerCamelCase_=1_0, lowerCamelCase_=0.02, lowerCamelCase_=3, lowerCamelCase_=None, lowerCamelCase_=2, ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = parent
lowerCamelCase__ : int = batch_size
lowerCamelCase__ : Dict = image_size
lowerCamelCase__ : List[str] = patch_size
lowerCamelCase__ : Union[str, Any] = num_channels
lowerCamelCase__ : str = is_training
lowerCamelCase__ : Any = use_labels
lowerCamelCase__ : Tuple = hidden_size
lowerCamelCase__ : str = num_hidden_layers
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : Dict = hidden_dropout_prob
lowerCamelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = type_sequence_label_size
lowerCamelCase__ : Optional[int] = initializer_range
lowerCamelCase__ : Tuple = scope
lowerCamelCase__ : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase__ : str = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[int] = num_patches + 2
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
if self.use_labels:
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCamelCase__ : List[str] = self.get_config()
return config, pixel_values, labels
def a__ (self ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCamelCase_, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = TFDeiTModel(config=lowerCamelCase_ )
lowerCamelCase__ : Dict = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : List[str] = TFDeiTForMaskedImageModeling(config=lowerCamelCase_ )
lowerCamelCase__ : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ : Optional[Any] = TFDeiTForMaskedImageModeling(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : int = self.type_sequence_label_size
lowerCamelCase__ : Union[str, Any] = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : List[str] = 1
lowerCamelCase__ : Any = TFDeiTForImageClassification(lowerCamelCase_ )
lowerCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[int] = model(lowerCamelCase_, labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = config_and_inputs
lowerCamelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase__ : Tuple = (
{
'feature-extraction': TFDeiTModel,
'image-classification': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase__ : Any = False
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Dict = False
lowerCamelCase__ : int = False
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = TFDeiTModelTester(self )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self, config_class=lowerCamelCase_, has_text_modality=lowerCamelCase_, hidden_size=3_7 )
def a__ (self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def a__ (self ):
'''simple docstring'''
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Optional[int] = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer) )
lowerCamelCase__ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_, tf.keras.layers.Dense ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Dict = model_class(lowerCamelCase_ )
lowerCamelCase__ : Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : str = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase_ )
def a__ (self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_=False ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = super()._prepare_for_class(lowerCamelCase_, lowerCamelCase_, return_labels=lowerCamelCase_ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def a__ (self ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : int = TFDeiTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCamelCase_ ( ):
lowerCamelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ (self ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' )
lowerCamelCase__ : List[Any] = self.default_image_processor
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Optional[int] = image_processor(images=lowerCamelCase_, return_tensors='tf' )
# forward pass
lowerCamelCase__ : Tuple = model(**lowerCamelCase_ )
# verify the logits
lowerCamelCase__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape, lowerCamelCase_ )
lowerCamelCase__ : Any = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3], lowerCamelCase_, atol=1e-4 ) )
| 316 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase_ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self , lowerCAmelCase ):
"""simple docstring"""
super().__init__()
snake_case = model
snake_case = 2
snake_case = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self ):
"""simple docstring"""
pass
def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str ) -> Dict:
"""simple docstring"""
snake_case = LongformerModel.from_pretrained(_UpperCamelCase )
snake_case = LightningModel(_UpperCamelCase )
snake_case = torch.load(_UpperCamelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
snake_case = LongformerForQuestionAnswering.from_pretrained(_UpperCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCamelCase )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 150 | """simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE__ = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
snake_case = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
_lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="replace" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<s>" , lowerCAmelCase="<unk>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<mask>" , lowerCAmelCase=False , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else bos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else eos_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else sep_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else cls_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else unk_token
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(lowerCAmelCase , lstrip=lowerCAmelCase , rstrip=lowerCAmelCase ) if isinstance(lowerCAmelCase , lowerCAmelCase ) else mask_token
super().__init__(
errors=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , cls_token=lowerCAmelCase , pad_token=lowerCAmelCase , mask_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
with open(lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
snake_case = json.load(lowerCAmelCase )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase , encoding='utf-8' ) as merges_handle:
snake_case = merges_handle.read().split('\n' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def snake_case ( self ):
"""simple docstring"""
return len(self.encoder )
def snake_case ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
snake_case = tuple(lowerCAmelCase )
snake_case = get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
snake_case = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case ,snake_case = bigram
snake_case = []
snake_case = 0
while i < len(lowerCAmelCase ):
try:
snake_case = word.index(lowerCAmelCase , lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(lowerCAmelCase )
snake_case = new_word
if len(lowerCAmelCase ) == 1:
break
else:
snake_case = get_pairs(lowerCAmelCase )
snake_case = ' '.join(lowerCAmelCase )
snake_case = word
return word
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = []
for token in re.findall(self.pat , lowerCAmelCase ):
snake_case = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase ).split(' ' ) )
return bpe_tokens
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase )
def snake_case ( self , lowerCAmelCase ):
"""simple docstring"""
snake_case = ''.join(lowerCAmelCase )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
snake_case = os.path.join(
lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + '\n' )
snake_case = 0
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
snake_case = token_index
writer.write(' '.join(lowerCAmelCase ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase )) + [1]
return [1] + ([0] * len(lowerCAmelCase )) + [1, 1] + ([0] * len(lowerCAmelCase )) + [1]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None ):
"""simple docstring"""
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self , lowerCAmelCase , lowerCAmelCase=False , **lowerCAmelCase ):
"""simple docstring"""
snake_case = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase ) > 0 and not text[0].isspace()):
snake_case = ' ' + text
return (text, kwargs)
def snake_case ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = PaddingStrategy.DO_NOT_PAD , lowerCAmelCase = None , lowerCAmelCase = None , ):
"""simple docstring"""
snake_case = super()._pad(
encoded_inputs=lowerCAmelCase , max_length=lowerCAmelCase , padding_strategy=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowerCAmelCase )
if needs_to_be_padded:
snake_case = len(lowerCAmelCase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 150 | 1 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 132 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowercase( __a , unittest.TestCase ):
'''simple docstring'''
lowercase__ = WavaVecaPhonemeCTCTokenizer
lowercase__ = False
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().setUp()
_snake_case : List[Any] = (
"""<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː """
"""ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː """
"""ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 """
"""oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ """
"""pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ """
"""yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ """
"""əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ """
"""ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ """
"""ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ """
"""uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ """
"""ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ """
"""ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ """
"""ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"""
).split(""" """ )
_snake_case : List[str] = dict(zip(a_, range(len(a_ ) ) ) )
_snake_case : Union[str, Any] = {"""pad_token""": """<pad>""", """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>"""}
_snake_case : Optional[Any] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file, """w""", encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
def UpperCamelCase_ ( self: Tuple, a_: str, a_: List[str]=False, a_: str=20, a_: Optional[int]=5 ):
'''simple docstring'''
_snake_case : Union[str, Any] = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=a_ )) for i in range(len(a_ ) )]
_snake_case : List[Any] = list(filter(lambda a_ : [t[0]] == tokenizer.encode(t[1], do_phonemize=a_ ), a_ ) )
if max_length is not None and len(a_ ) > max_length:
_snake_case : Dict = toks[:max_length]
if min_length is not None and len(a_ ) < min_length and len(a_ ) > 0:
while len(a_ ) < min_length:
_snake_case : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
_snake_case : str = [t[0] for t in toks]
# Ensure consistency
_snake_case : Optional[Any] = tokenizer.decode(a_, clean_up_tokenization_spaces=a_ )
if " " not in output_txt and len(a_ ) > 1:
_snake_case : Optional[Any] = (
tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=a_ )
+ """ """
+ tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=a_ )
)
if with_prefix_space:
_snake_case : str = """ """ + output_txt
_snake_case : Tuple = tokenizer.encode(a_, add_special_tokens=a_ )
return output_txt, output_ids
def UpperCamelCase_ ( self: int, **a_: List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname, **a_ )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
# check adding a single token
tokenizer.add_tokens("""xxx""" )
_snake_case : Optional[Any] = tokenizer("""m xxx ɪ""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["""aaa""", """bbb""", """ccc"""] )
_snake_case : Union[str, Any] = tokenizer("""m aaa ɪ ccc""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
_snake_case : Any = tokenizer("""maɪ c""", do_phonemize=a_ ).input_ids
self.assertEqual(a_, [3, 200] ) # mai should be <unk> (=3)
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : int = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Tuple = """Hello how are you"""
_snake_case : Optional[int] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(a_, """h ə l oʊ h aʊ ɑːɹ j uː""" )
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : int = """Hello how are you"""
_snake_case : int = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a_ ).input_ids, tokenizer(a_, do_phonemize=a_ ).input_ids )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Tuple = """Hello how are you"""
_snake_case : List[str] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Optional[Any] = tokenizer.decode(tokenizer(a_ ).input_ids )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
_snake_case : List[Any] = tokenizer.decode(sample_ids[0] )
_snake_case : str = tokenizer.batch_decode(a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : Union[str, Any] = """Hello how are you"""
_snake_case : Tuple = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(a_, """h ə l oʊ | h aʊ | ɑːɹ | j uː |""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : int = """Hello how are you"""
_snake_case : Dict = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
self.assertEqual(tokenizer(a_ ).input_ids, tokenizer(a_, do_phonemize=a_ ).input_ids )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
_snake_case : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
_snake_case : List[str] = tokenizer.decode(sample_ids[0] )
_snake_case : Optional[int] = tokenizer.batch_decode(a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ""", """j ð s j ð s oːɹ"""] )
# decode with no word_del_token filter
_snake_case : int = tokenizer.decode(sample_ids[0], filter_word_delimiter_token=a_ )
_snake_case : Dict = tokenizer.batch_decode(a_, filter_word_delimiter_token=a_ )
self.assertEqual(a_, batch_tokens[0] )
self.assertEqual(a_, ["""k s ɾ | ɾ l | ɭʲ""", """| j ð | s j ð s oːɹ"""] )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : str = """Hello how are you"""
_snake_case : List[Any] = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Union[str, Any] = tokenizer.decode(tokenizer(a_ ).input_ids, filter_word_delimiter_token=a_ )
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : str = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
_snake_case : Any = """Hello how are you"""
_snake_case : Any = tokenizer.phonemize(a_, phonemizer_lang="""en-us""" )
_snake_case : Any = tokenizer.decode(tokenizer(a_ ).input_ids, filter_word_delimiter_token=a_ )
self.assertEqual(""" """.join([p.strip() for p in phonemes.split(""" |""" )] ).strip(), a_ )
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained(
"""facebook/wav2vec2-lv-60-espeak-cv-ft""", word_delimiter_token=a_ )
_snake_case : str = """Hello how are you"""
_snake_case : str = tokenizer(a_, phonemizer_lang="""en-us""" ).input_ids
_snake_case : List[str] = tokenizer(a_, phonemizer_lang="""fr-fr""" ).input_ids
self.assertNotEqual(a_, a_ )
_snake_case : Dict = tokenizer.decode(a_ )
_snake_case : Union[str, Any] = tokenizer.decode(a_ )
self.assertEqual(a_, """h ə l oʊ h aʊ ɑːɹ j uː""" )
self.assertEqual(a_, """ɛ l o h aʊ a ʁ j u""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
_snake_case : Optional[Any] = """Hello how Are you"""
_snake_case : Optional[int] = """hello how are you"""
_snake_case : List[str] = tokenizer(a_ ).input_ids
_snake_case : int = tokenizer(a_ ).input_ids
self.assertEqual(a_, a_ )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Tuple = self.tokenizer_class.from_pretrained("""facebook/wav2vec2-lv-60-espeak-cv-ft""" )
tokenizer.add_tokens(["""!""", """?"""] )
tokenizer.add_special_tokens({"""cls_token""": """$$$"""} )
# fmt: off
_snake_case : List[Any] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
_snake_case : List[Any] = tokenizer.batch_decode(a_ )
self.assertEqual(a_, ["""k s ɾ ɾ l ɭʲ!?!? $$$""", """j ð s j ð s oːɹ $$$"""] )
@staticmethod
def UpperCamelCase_ ( a_: Tuple, a_: Tuple ):
'''simple docstring'''
_snake_case : List[str] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
tokenizer.add_tokens("""|""" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
_snake_case : Optional[int] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
_snake_case : Any = tokenizer.decode(a_, output_char_offsets=a_, filter_word_delimiter_token=a_ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ), 2 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""char_offsets""" in outputs )
self.assertTrue(isinstance(a_, a_ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""char_offsets"""], """char""" ) ), outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """char""" ), ["""k""", """s""", """ɾ""", """ɾ""", """|""", """ɾ""", """l""", """|""", """ɭʲ"""] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """start_offset""" ), [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["""char_offsets"""], """end_offset""" ), [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = self.get_tokenizer(word_delimiter_token="""|""" )
def check_list_tuples_equal(a_: List[str], a_: Optional[Any] ):
self.assertTrue(isinstance(a_, a_ ) )
self.assertTrue(isinstance(outputs_list[0], a_ ) )
# transform list to ModelOutput
_snake_case : int = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["""text"""], outputs_batch_a["""text"""] )
def recursive_check(a_: Union[str, Any], a_: Optional[Any] ):
if isinstance(a_, a_ ):
[recursive_check(a_, a_ ) for la, la in zip(a_, a_ )]
self.assertEqual(a_, a_ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["""char_offsets"""], outputs_batch_a["""char_offsets"""] )
# fmt: off
_snake_case : int = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
_snake_case : int = tokenizer.batch_decode(a_, output_char_offsets=a_ )
_snake_case : Union[str, Any] = [tokenizer.decode(a_, output_char_offsets=a_ ) for ids in sample_ids]
check_list_tuples_equal(a_, a_ )
@unittest.skip("""Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes""" )
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeTokenizer always puts spaces between phonemes""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
pass
@unittest.skip("""encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency""" )
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
pass
@unittest.skip("""Wav2Vec2PhonemeModel has no max model length => no testing""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : int = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : Optional[Any] = tokenizer.vocab_size
_snake_case : int = len(a_ )
self.assertNotEqual(a_, 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_snake_case : Optional[int] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_snake_case : Any = tokenizer.add_tokens(a_ )
_snake_case : Tuple = tokenizer.vocab_size
_snake_case : List[str] = len(a_ )
self.assertNotEqual(a_, 0 )
self.assertEqual(a_, a_ )
self.assertEqual(a_, len(a_ ) )
self.assertEqual(a_, all_size + len(a_ ) )
_snake_case : Optional[Any] = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""", add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ), 4 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
_snake_case : Optional[int] = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_snake_case : Dict = tokenizer.add_special_tokens(a_ )
_snake_case : List[str] = tokenizer.vocab_size
_snake_case : List[Any] = len(a_ )
self.assertNotEqual(a_, 0 )
self.assertEqual(a_, a_ )
self.assertEqual(a_, len(a_ ) )
self.assertEqual(a_, all_size_a + len(a_ ) )
_snake_case : Dict = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""", add_special_tokens=a_ )
self.assertGreaterEqual(len(a_ ), 6 )
self.assertGreater(tokens[0], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0], tokens[1] )
self.assertGreater(tokens[-3], tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3], tokens[-4] )
self.assertEqual(tokens[0], tokenizer.eos_token_id )
self.assertEqual(tokens[-3], tokenizer.pad_token_id )
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip("""The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode.""" )
def UpperCamelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : str = self.get_tokenizers(fast=a_, do_lower_case=a_ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_snake_case : str = ["""ð""", """ɪ""", """s""", """ɪ""", """z""", """ɐ""", """t""", """ɛ""", """k""", """s""", """t"""]
_snake_case : Optional[Any] = tokenizer.convert_tokens_to_string(a_ )
self.assertIsInstance(output["""text"""], a_ )
| 132 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = '''timm_backbone'''
def __init__( self : Tuple , lowerCamelCase_ : int=None , lowerCamelCase_ : int=3 , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : int , ):
'''simple docstring'''
super().__init__(**lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = backbone
SCREAMING_SNAKE_CASE : int = num_channels
SCREAMING_SNAKE_CASE : Optional[Any] = features_only
SCREAMING_SNAKE_CASE : Any = use_pretrained_backbone
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = out_indices if out_indices is not None else (-1,)
| 323 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Dict[str, int] , lowerCamelCase_ : List[str] , lowerCamelCase_ : int = None , lowerCamelCase_ : int = None ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Any = pad_token_id
SCREAMING_SNAKE_CASE : List[Any] = max_length
SCREAMING_SNAKE_CASE : Optional[int] = vocab
SCREAMING_SNAKE_CASE : List[Any] = merges
SCREAMING_SNAKE_CASE : Tuple = BytePairTokenizer(lowerCamelCase_ , lowerCamelCase_ , sequence_length=lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : Any , lowerCamelCase_ : GPTaTokenizer , *lowerCamelCase_ : str , **lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [""" """.join(lowerCamelCase_ ) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE : List[str] = tokenizer.get_vocab()
return cls(lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , lowerCamelCase_ : Union[str, os.PathLike] , *lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = GPTaTokenizer.from_pretrained(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
return cls.from_tokenizer(lowerCamelCase_ , *lowerCamelCase_ , **lowerCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls : List[str] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return cls(**lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : int = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tf_tokenizer(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = tf.ones_like(lowerCamelCase_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = pad_model_inputs(
lowerCamelCase_ , max_seq_length=lowerCamelCase_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 323 | 1 |
"""simple docstring"""
def a_ ( _lowercase ):
_UpperCamelCase : Any = len(_lowercase )
_UpperCamelCase : Tuple = len(matrix[0] )
_UpperCamelCase : str = min(_lowercase , _lowercase )
for row in range(_lowercase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , _lowercase ):
_UpperCamelCase : Optional[int] = matrix[col][row] / matrix[row][row]
for i in range(_lowercase , _lowercase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_UpperCamelCase : Dict = True
for i in range(row + 1 , _lowercase ):
if matrix[i][row] != 0:
_UpperCamelCase , _UpperCamelCase : Dict = matrix[i], matrix[row]
_UpperCamelCase : int = False
break
if reduce:
rank -= 1
for i in range(_lowercase ):
_UpperCamelCase : Union[str, Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def a_ ( ):
_UpperCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCamelCase : int = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 128 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCAmelCase ( __UpperCamelCase ):
@slow
@require_torch
def A_ ( self : Tuple ) -> str:
lowerCamelCase__ : Optional[int] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' , 'prajjwal1/bert-tiny' )
lowerCamelCase__ : List[Any] = BertTokenizer.from_pretrained('bert-base-uncased' )
lowerCamelCase__ : Tuple = bertabert.config.encoder.vocab_size
lowerCamelCase__ : Optional[int] = tokenizer.sep_token_id
lowerCamelCase__ : List[Any] = tokenizer.cls_token_id
lowerCamelCase__ : str = 128
lowerCamelCase__ : Optional[Any] = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='train[:1%]' )
lowerCamelCase__ : Any = datasets.load_dataset('cnn_dailymail' , '3.0.0' , split='validation[:1%]' )
lowerCamelCase__ : List[Any] = train_dataset.select(range(32 ) )
lowerCamelCase__ : Dict = val_dataset.select(range(16 ) )
lowerCamelCase__ : str = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase : int ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowerCamelCase__ : List[str] = tokenizer(batch['article'] , padding='max_length' , truncation=UpperCAmelCase , max_length=512 )
lowerCamelCase__ : Optional[Any] = tokenizer(batch['highlights'] , padding='max_length' , truncation=UpperCAmelCase , max_length=128 )
lowerCamelCase__ : int = inputs.input_ids
lowerCamelCase__ : List[str] = inputs.attention_mask
lowerCamelCase__ : Optional[int] = outputs.input_ids
lowerCamelCase__ : Dict = outputs.input_ids.copy()
lowerCamelCase__ : Union[str, Any] = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels']
]
lowerCamelCase__ : Union[str, Any] = outputs.attention_mask
assert all(len(UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase : Optional[int] ):
lowerCamelCase__ : int = pred.label_ids
lowerCamelCase__ : Optional[int] = pred.predictions
# all unnecessary tokens are removed
lowerCamelCase__ : Optional[int] = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCamelCase__ : List[str] = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCamelCase__ : Any = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCAmelCase ) )] ) / len(UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
lowerCamelCase__ : int = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
train_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
# same for validation dataset
lowerCamelCase__ : Optional[int] = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCAmelCase , batch_size=UpperCAmelCase , remove_columns=['article', 'highlights'] , )
val_dataset.set_format(
type='torch' , columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] , )
lowerCamelCase__ : Dict = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : List[str] = SeqaSeqTrainingArguments(
output_dir=UpperCAmelCase , per_device_train_batch_size=UpperCAmelCase , per_device_eval_batch_size=UpperCAmelCase , predict_with_generate=UpperCAmelCase , evaluation_strategy='steps' , do_train=UpperCAmelCase , do_eval=UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowerCamelCase__ : List[str] = SeqaSeqTrainer(
model=UpperCAmelCase , args=UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , tokenizer=UpperCAmelCase , )
# start training
trainer.train()
| 50 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
"""simple docstring"""
if attention_mask is None:
A_ : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A_ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase :
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Optional[int]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=99 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Any=2 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Optional[Any]=0.02 , ):
"""simple docstring"""
A_ : Any = parent
A_ : Any = batch_size
A_ : Optional[Any] = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_labels
A_ : str = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Tuple = eos_token_id
A_ : int = pad_token_id
A_ : int = bos_token_id
A_ : str = initializer_range
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A_ : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A_ : Optional[Any] = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
A_ : Any = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ , A_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : str = 20
A_ : Any = model_class_name(_lowerCamelCase )
A_ : List[Any] = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : int = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
A_ : str = model.decode(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def a_ ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = 20
A_ : Dict = model_class_name(_lowerCamelCase )
A_ : Dict = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ : Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Tuple = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
A_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase ( unittest.TestCase):
__lowerCAmelCase : Dict = 99
def a_ ( self : str ):
"""simple docstring"""
A_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A_ : List[str] = input_ids.shape[0]
A_ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ , A_ : List[Any] = self._get_config_and_data()
A_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : Optional[int] = lm_model(input_ids=_lowerCamelCase )
A_ : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A_ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A_ : Dict = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
A_ : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A_ : Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
A_ : Tuple = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase):
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : List[str] ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A_ : Optional[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : List[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A_ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
A_ : Union[str, Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : Optional[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self : Tuple ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : str = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A_ : str = np.ones((1, 1) ) * model.config.eos_token_id
A_ : List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 167 | 0 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''feature_extractor''']
UpperCamelCase = '''TvltImageProcessor'''
UpperCamelCase = '''TvltFeatureExtractor'''
def __init__( self : Union[str, Any], lowerCAmelCase__ : str, lowerCAmelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(image_processor=lowerCAmelCase__, feature_extractor=lowerCAmelCase__ )
_UpperCamelCase : List[str] = image_processor
_UpperCamelCase : int = feature_extractor
def __call__( self : List[str], lowerCAmelCase__ : Optional[int]=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Dict=None, lowerCAmelCase__ : str=None, lowerCAmelCase__ : Optional[int]=False, lowerCAmelCase__ : str=False, *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : Optional[int], ) -> Dict:
'''simple docstring'''
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
_UpperCamelCase : Optional[int] = None
if images is not None:
_UpperCamelCase : Optional[int] = self.image_processor(lowerCAmelCase__, mask_pixel=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if images_mixed is not None:
_UpperCamelCase : str = self.image_processor(lowerCAmelCase__, is_mixed=lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__ )
if audio is not None:
_UpperCamelCase : Union[str, Any] = self.feature_extractor(
lowerCAmelCase__, *lowerCAmelCase__, sampling_rate=lowerCAmelCase__, mask_audio=lowerCAmelCase__, **lowerCAmelCase__ )
_UpperCamelCase : str = {}
if audio is not None:
output_dict.update(lowerCAmelCase__ )
if images is not None:
output_dict.update(lowerCAmelCase__ )
if images_mixed_dict is not None:
output_dict.update(lowerCAmelCase__ )
return output_dict
@property
def snake_case ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[str] = self.image_processor.model_input_names
_UpperCamelCase : List[Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 128 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCamelCase_ ="""bart"""
UpperCamelCase_ =True
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_UpperCamelCase : Optional[Any] = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_UpperCamelCase : Union[str, Any] = qar_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : str = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_UpperCamelCase : List[str] = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_UpperCamelCase : List[Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_UpperCamelCase : Dict = sas_model.eval()
else:
_UpperCamelCase , _UpperCamelCase : List[Any] = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
if LOAD_DENSE_INDEX:
_UpperCamelCase : List[Any] = faiss.StandardGpuResources()
_UpperCamelCase : List[str] = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_UpperCamelCase : Tuple = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
_UpperCamelCase : Optional[int] = faiss.IndexFlatIP(128 )
_UpperCamelCase : Tuple = faiss.index_cpu_to_gpu(_lowercase , 1 , _lowercase )
wikiaab_gpu_index_flat.add(_lowercase ) # TODO fix for larger GPU
else:
_UpperCamelCase , _UpperCamelCase : Tuple = (None, None)
_UpperCamelCase : List[Any] = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_lowercase )
def a_ ( ):
_UpperCamelCase : Optional[Any] = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_UpperCamelCase : Any = elia['''train_eli5''']
_UpperCamelCase : Union[str, Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
_UpperCamelCase : str = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(_lowercase )
return (elia_train, eli5_train_q_index)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_indexes()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =load_models()
UpperCamelCase_ , UpperCamelCase_ =load_train_data()
def a_ ( _lowercase , _lowercase=10 ):
_UpperCamelCase : Any = embed_questions_for_retrieval([question] , _lowercase , _lowercase )
_UpperCamelCase , _UpperCamelCase : List[Any] = eli5_train_q_index.search(_lowercase , _lowercase )
_UpperCamelCase : Tuple = [elia_train[int(_lowercase )] for i in I[0]]
return nn_examples
def a_ ( _lowercase , _lowercase="wiki40b" , _lowercase="dense" , _lowercase=10 ):
if source == "none":
_UpperCamelCase , _UpperCamelCase : List[str] = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase , _UpperCamelCase : Dict = query_qa_dense_index(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
else:
_UpperCamelCase , _UpperCamelCase : List[str] = query_es_index(
_lowercase , _lowercase , index_name='''english_wiki40b_snippets_100w''' , n_results=_lowercase , )
_UpperCamelCase : Any = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_UpperCamelCase : List[Any] = '''question: {} context: {}'''.format(_lowercase , _lowercase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _lowercase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowercase : None),
} )
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase=64 , _lowercase=256 , _lowercase=False , _lowercase=2 , _lowercase=0.95 , _lowercase=0.8 ):
with torch.no_grad():
_UpperCamelCase : List[Any] = qa_sas_generate(
_lowercase , _lowercase , _lowercase , num_answers=1 , num_beams=_lowercase , min_len=_lowercase , max_len=_lowercase , do_sample=_lowercase , temp=_lowercase , top_p=_lowercase , top_k=_lowercase , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
UpperCamelCase_ ="""<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
UpperCamelCase_ ="""
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCamelCase_ ="""
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCamelCase_ =[
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
UpperCamelCase_ =st.sidebar.checkbox("""Demo options""")
if demo_options:
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
UpperCamelCase_ =action_list.index(action_st)
UpperCamelCase_ =st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
UpperCamelCase_ =show_type == """Show full text of passages"""
else:
UpperCamelCase_ =3
UpperCamelCase_ =True
UpperCamelCase_ =st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
UpperCamelCase_ ="""
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
UpperCamelCase_ =st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
UpperCamelCase_ ="""wiki40b"""
UpperCamelCase_ ="""dense"""
UpperCamelCase_ ="""beam"""
UpperCamelCase_ =2
UpperCamelCase_ =64
UpperCamelCase_ =256
UpperCamelCase_ =None
UpperCamelCase_ =None
UpperCamelCase_ =st.sidebar.checkbox("""Generation options""")
if generate_options:
UpperCamelCase_ ="""
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
UpperCamelCase_ =st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
UpperCamelCase_ =st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCamelCase_ =st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCamelCase_ =st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCamelCase_ =st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCamelCase_ =None
# start main text
UpperCamelCase_ =[
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
UpperCamelCase_ =st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCamelCase_ =st.text_input("""Enter your question here:""", """""")
else:
UpperCamelCase_ =question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""dense""", n_results=10)
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method="""sparse""", n_results=10)
UpperCamelCase_ =[]
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCamelCase_ =support_list[:10]
UpperCamelCase_ ="""<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
UpperCamelCase_ , UpperCamelCase_ =make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCamelCase_ , UpperCamelCase_ =answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
UpperCamelCase_ ="""https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
UpperCamelCase_ =res[1].strip()
if sec_titles == "":
UpperCamelCase_ ="""[{}]({})""".format(res[0], wiki_url)
else:
UpperCamelCase_ =sec_titles.split(""" & """)
UpperCamelCase_ =""" & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
UpperCamelCase_ =find_nearest_training(question)
UpperCamelCase_ =nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
UpperCamelCase_ =[
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
UpperCamelCase_ ="""
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 128 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Union[str, Any] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[int] = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324 |
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] = 'beit'
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=81_92 , SCREAMING_SNAKE_CASE_ : Optional[int]=7_68 , SCREAMING_SNAKE_CASE_ : List[str]=12 , SCREAMING_SNAKE_CASE_ : List[Any]=12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=30_72 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.0 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Any=1E-12 , SCREAMING_SNAKE_CASE_ : Tuple=2_24 , SCREAMING_SNAKE_CASE_ : Optional[int]=16 , SCREAMING_SNAKE_CASE_ : Optional[int]=3 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : List[str]=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.1 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : Any=0.4 , SCREAMING_SNAKE_CASE_ : List[str]=2_56 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_55 , **SCREAMING_SNAKE_CASE_ : str , ) -> List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A: Optional[Any] = vocab_size
A: Dict = hidden_size
A: Tuple = num_hidden_layers
A: str = num_attention_heads
A: Tuple = intermediate_size
A: str = hidden_act
A: Tuple = hidden_dropout_prob
A: int = attention_probs_dropout_prob
A: Optional[int] = initializer_range
A: Optional[Any] = layer_norm_eps
A: Tuple = image_size
A: int = patch_size
A: Any = num_channels
A: Union[str, Any] = use_mask_token
A: Optional[Any] = use_absolute_position_embeddings
A: str = use_relative_position_bias
A: Dict = use_shared_relative_position_bias
A: Union[str, Any] = layer_scale_init_value
A: str = drop_path_rate
A: Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
A: Union[str, Any] = out_indices
A: str = pool_scales
# auxiliary head attributes (semantic segmentation)
A: List[str] = use_auxiliary_head
A: str = auxiliary_loss_weight
A: Dict = auxiliary_channels
A: List[str] = auxiliary_num_convs
A: Any = auxiliary_concat_input
A: Dict = semantic_loss_ignore_index
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
UpperCamelCase_ : Union[str, Any] = version.parse("""1.11""" )
@property
def _snake_case ( self : str ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def _snake_case ( self : str ) -> float:
'''simple docstring'''
return 1E-4
| 364 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , *SCREAMING_SNAKE_CASE_ : str , **SCREAMING_SNAKE_CASE_ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 334 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""microsoft/swinv2-tiny-patch4-window8-256""": (
"""https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"""
),
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'swinv2'
UpperCamelCase : List[str] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , lowerCAmelCase : Union[str, Any]=224 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : List[str]=3 , lowerCAmelCase : List[Any]=96 , lowerCAmelCase : Optional[Any]=[2, 2, 6, 2] , lowerCAmelCase : Any=[3, 6, 12, 24] , lowerCAmelCase : List[Any]=7 , lowerCAmelCase : List[Any]=4.0 , lowerCAmelCase : List[str]=True , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : Optional[Any]=0.1 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : str=False , lowerCAmelCase : Optional[Any]=0.0_2 , lowerCAmelCase : Optional[Any]=1E-5 , lowerCAmelCase : Any=32 , **lowerCAmelCase : Tuple , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] =image_size
SCREAMING_SNAKE_CASE_: Any =patch_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =num_channels
SCREAMING_SNAKE_CASE_: List[Any] =embed_dim
SCREAMING_SNAKE_CASE_: List[str] =depths
SCREAMING_SNAKE_CASE_: Tuple =len(lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Dict =num_heads
SCREAMING_SNAKE_CASE_: List[str] =window_size
SCREAMING_SNAKE_CASE_: Dict =mlp_ratio
SCREAMING_SNAKE_CASE_: Optional[int] =qkv_bias
SCREAMING_SNAKE_CASE_: Optional[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Any =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Dict =drop_path_rate
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =use_absolute_embeddings
SCREAMING_SNAKE_CASE_: Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[Any] =initializer_range
SCREAMING_SNAKE_CASE_: List[str] =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE_: List[str] =int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
SCREAMING_SNAKE_CASE_: Dict =(0, 0, 0, 0)
| 173 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a ( UpperCAmelCase__ ):
UpperCamelCase : Dict = 'vit_mae'
def __init__( self : int , lowerCAmelCase : int=768 , lowerCAmelCase : List[str]=12 , lowerCAmelCase : Any=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : Any="gelu" , lowerCAmelCase : str=0.0 , lowerCAmelCase : Any=0.0 , lowerCAmelCase : List[Any]=0.0_2 , lowerCAmelCase : int=1E-12 , lowerCAmelCase : Optional[Any]=224 , lowerCAmelCase : Optional[Any]=16 , lowerCAmelCase : Any=3 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Any=16 , lowerCAmelCase : str=512 , lowerCAmelCase : int=8 , lowerCAmelCase : Union[str, Any]=2048 , lowerCAmelCase : Tuple=0.7_5 , lowerCAmelCase : str=False , **lowerCAmelCase : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase )
SCREAMING_SNAKE_CASE_: Optional[int] =hidden_size
SCREAMING_SNAKE_CASE_: Optional[int] =num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[int] =num_attention_heads
SCREAMING_SNAKE_CASE_: Optional[Any] =intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =hidden_act
SCREAMING_SNAKE_CASE_: Tuple =hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[str] =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] =initializer_range
SCREAMING_SNAKE_CASE_: int =layer_norm_eps
SCREAMING_SNAKE_CASE_: List[str] =image_size
SCREAMING_SNAKE_CASE_: Dict =patch_size
SCREAMING_SNAKE_CASE_: str =num_channels
SCREAMING_SNAKE_CASE_: List[str] =qkv_bias
SCREAMING_SNAKE_CASE_: List[str] =decoder_num_attention_heads
SCREAMING_SNAKE_CASE_: Any =decoder_hidden_size
SCREAMING_SNAKE_CASE_: Optional[Any] =decoder_num_hidden_layers
SCREAMING_SNAKE_CASE_: str =decoder_intermediate_size
SCREAMING_SNAKE_CASE_: Union[str, Any] =mask_ratio
SCREAMING_SNAKE_CASE_: List[str] =norm_pix_loss
| 173 | 1 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : str = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
UpperCamelCase = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase = get_sagemaker_input()
else:
UpperCamelCase = get_cluster_input()
return config
def __lowerCamelCase ( A__=None ) -> List[str]:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase = subparsers.add_parser('config' , description=A__ )
else:
UpperCamelCase = argparse.ArgumentParser('Accelerate config command' , description=A__ )
parser.add_argument(
'--config_file' , default=A__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def __lowerCamelCase ( A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = get_user_input()
if args.config_file is not None:
UpperCamelCase = args.config_file
else:
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(A__ )
else:
config.to_yaml_file(A__ )
print(F"""accelerate configuration saved at {config_file}""" )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = config_command_parser()
UpperCamelCase = parser.parse_args()
config_command(A__ )
if __name__ == "__main__":
main()
| 351 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = XGLMConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=1_4 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[str]=9_9 , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=3_7 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=5_1_2 , UpperCamelCase__ : Optional[Any]=0.0_2 , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = activation_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = None
UpperCamelCase = 0
UpperCamelCase = 2
UpperCamelCase = 1
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = self.get_config()
UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def A ( self : Union[str, Any] ):
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (TFXGLMForCausalLM,) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=3_7 )
def A ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def A ( self : List[str] ):
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def A ( self : Dict ):
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Optional[int] , UpperCamelCase__ : Tuple=True ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCamelCase = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
UpperCamelCase = tokenizer('Today is a nice day and' , return_tensors='tf' )
UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
UpperCamelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
UpperCamelCase = 'left'
# use different length sentences to test batching
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='tf' , padding=UpperCamelCase__ )
UpperCamelCase = inputs['input_ids']
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['attention_mask'] , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
UpperCamelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=1_2 )
UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCamelCase = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 249 | 0 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A__ = datasets.logging.get_logger(__name__)
A__ = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A__ = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A__ = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="dummy_doc" ) -> int:
"""simple docstring"""
snake_case__ : Dict = {doc: key_lines}
snake_case__ : Any = {doc: sys_lines}
snake_case__ : Dict = {}
snake_case__ : List[str] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[Any] = 0
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : List[Any] = 0
snake_case__ , snake_case__ : Tuple = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case__ : str = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ , snake_case__ : int = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case__ : Union[str, Any] = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
snake_case__ , snake_case__ : Dict = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case__ , snake_case__ : Optional[int] = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case__ : Any = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : Optional[int] = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : List[Any] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = {}
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = 0
for name, metric in metrics:
snake_case__ , snake_case__ , snake_case__ : Any = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
snake_case__ : int = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : str = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case__ : List[Any] = line.split()[5]
if not parse_col == "-":
snake_case__ : Optional[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) ,codebase_urls=['''https://github.com/ns-moosavi/coval'''] ,reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] ,)
def __lowerCamelCase ( self :Any ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :str=True ,__lowercase :Optional[int]=False ,__lowercase :Optional[Any]=False ,__lowercase :Tuple=False ):
snake_case__ : Optional[Any] = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case__ : Optional[int] = util.check_gold_parse_annotation(__lowercase )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case__ : Any = evaluate(
key_lines=__lowercase ,sys_lines=__lowercase ,metrics=__lowercase ,NP_only=__lowercase ,remove_nested=__lowercase ,keep_singletons=__lowercase ,min_span=__lowercase ,)
return score
| 230 |
A__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A__ = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
"""simple docstring"""
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case__ : Optional[int] = year // 100
snake_case__ : List[str] = (5 * (century % 4) + 2) % 7
snake_case__ : Dict = year % 100
snake_case__ : Union[str, Any] = centurian % 12
snake_case__ : List[Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case__ : List[str] = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case__ : List[str] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 230 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = "altclip_text_model"
def __init__( self , A=25_00_02 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=0.02 , A=0.02 , A=1e-0_5 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=7_68 , **A , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = project_dim
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = "altclip_vision_model"
def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3 , A=2_24 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = projection_dim
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = "altclip"
UpperCamelCase : Optional[Any] = True
def __init__( self , A=None , A=None , A=7_68 , A=2.6592 , **A ) -> Dict:
'''simple docstring'''
lowerCamelCase = kwargs.pop("""text_config_dict""" , A )
lowerCamelCase = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase = AltCLIPTextConfig(**A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase = AltCLIPVisionConfig(**A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase = {
str(A ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowerCamelCase = AltCLIPTextConfig(**A )
lowerCamelCase = AltCLIPVisionConfig(**A )
lowerCamelCase = projection_dim
lowerCamelCase = logit_scale_init_value
lowerCamelCase = 1.0
@classmethod
def __A ( cls , A , A , **A ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 358 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Dict = StableDiffusionPanoramaPipeline
UpperCamelCase : List[Any] = TEXT_TO_IMAGE_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowerCamelCase = DDIMScheduler()
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
lowerCamelCase = CLIPTextModel(A )
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A , A=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
# Setting height and width to None to prevent OOMs on CPU.
"""height""": None,
"""width""": None,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = """french fries"""
lowerCamelCase = sd_pipe(**A , negative_prompt=A )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A , view_batch_size=2 )
lowerCamelCase = output.images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A )
lowerCamelCase = StableDiffusionPanoramaPipeline(**A )
lowerCamelCase = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = sd_pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , A=0 ) -> Dict:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(A )
lowerCamelCase = {
"""prompt""": """a photo of the dolomites""",
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
0.36968392,
0.27025372,
0.32446766,
0.28379387,
0.36363274,
0.30733347,
0.27100027,
0.27054125,
0.25536096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-base""" , safety_checker=A )
lowerCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A ).images
lowerCamelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
lowerCamelCase = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 0
def callback_fn(A , A , A ) -> None:
lowerCamelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18681869,
0.33907816,
0.5361276,
0.14432865,
-0.02856611,
-0.73941123,
0.23397987,
0.47322682,
-0.37823164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCamelCase = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
lowerCamelCase = latents[0, -3:, -3:, -1]
lowerCamelCase = np.array(
[
0.18539645,
0.33987248,
0.5378559,
0.14437142,
-0.02455261,
-0.7338317,
0.23990755,
0.47356272,
-0.3786505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCamelCase = False
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = self.get_inputs()
pipe(**A , callback=A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __A ( self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase = """stabilityai/stable-diffusion-2-base"""
lowerCamelCase = DDIMScheduler.from_pretrained(A , subfolder="""scheduler""" )
lowerCamelCase = StableDiffusionPanoramaPipeline.from_pretrained(A , scheduler=A , safety_checker=A )
lowerCamelCase = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase = self.get_inputs()
lowerCamelCase = pipe(**A )
lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 66 | 0 |
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
} | 112 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list[int] , _lowerCamelCase: str ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
# Initialize Result
__SCREAMING_SNAKE_CASE : Tuple = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase__ : int = []
UpperCamelCase__ : List[Any] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
UpperCamelCase__ : Tuple = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
UpperCamelCase__ : str = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase__ : List[Any] = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase__ : str = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(f"Following is minimal change for {value}: ")
UpperCamelCase__ : int = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''') | 112 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase = logging.get_logger(__name__)
class _a ( A_ , A_ ):
_lowercase : Union[str, Any] = '''maskformer-swin'''
_lowercase : int = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self: Optional[Any] , UpperCamelCase_: Union[str, Any]=224 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: int=96 , UpperCamelCase_: str=[2, 2, 6, 2] , UpperCamelCase_: Union[str, Any]=[3, 6, 12, 24] , UpperCamelCase_: List[str]=7 , UpperCamelCase_: Dict=4.0 , UpperCamelCase_: Tuple=True , UpperCamelCase_: Any=0.0 , UpperCamelCase_: Dict=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=False , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: int=1E-5 , UpperCamelCase_: List[str]=None , UpperCamelCase_: Any=None , **UpperCamelCase_: str , ) -> Any:
"""simple docstring"""
super().__init__(**_lowerCamelCase )
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(_lowerCamelCase )
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(_lowerCamelCase ) - 1) )
lowercase__ = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(_lowerCamelCase ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 368 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = ShapEImgaImgPipeline
_lowercase : Optional[Any] = ['''image''']
_lowercase : Optional[int] = ['''image''']
_lowercase : Optional[int] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
_lowercase : Tuple = False
@property
def lowerCamelCase_ ( self: List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: str ) -> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
return 8
@property
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowercase__ = CLIPVisionModel(UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
lowercase__ = CLIPImageProcessor(
crop_size=224 , do_center_crop=UpperCamelCase_ , do_normalize=UpperCamelCase_ , do_resize=UpperCamelCase_ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
lowercase__ = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
lowercase__ = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
lowercase__ = self.dummy_prior
lowercase__ = self.dummy_image_encoder
lowercase__ = self.dummy_image_processor
lowercase__ = self.dummy_renderer
lowercase__ = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
lowercase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: Tuple , UpperCamelCase_: Optional[int]=0 ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: int ) -> str:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images[0]
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowercase__ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self: List[str] ) -> List[Any]:
"""simple docstring"""
lowercase__ = torch_device == '''cpu'''
lowercase__ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = 1
lowercase__ = 2
lowercase__ = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
lowercase__ = batch_size * [inputs[key]]
lowercase__ = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: str ) -> str:
"""simple docstring"""
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
lowercase__ = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase__ = pipe(
UpperCamelCase_ , generator=UpperCamelCase_ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 93 | 0 |
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__a :Tuple = False
class _a ( unittest.TestCase ):
"""simple docstring"""
pass
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : str ):
A_ = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
A_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
A_ = torch.manual_seed(0 )
A_ = pipe(
image=UpperCAmelCase , generator=UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
A_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A_ = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 312 |
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
__a :Dict = get_logger(__name__)
__a :Union[str, Any] = Path(__file__).parent / 'model_card_template.md'
__a :Tuple = uuida().hex
__a :List[Any] = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
__a :Union[str, Any] = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
__a :Tuple = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def __snake_case ( __UpperCamelCase : Union[Dict, str, None] = None ):
"""simple docstring"""
A_ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'''; torch/{_torch_version}'''
if is_flax_available():
ua += f'''; jax/{_jax_version}'''
ua += f'''; flax/{_flax_version}'''
if is_onnx_available():
ua += f'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get("DIFFUSERS_IS_CI" ,"" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
ua += "; " + user_agent
return ua
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if token is None:
A_ = HfFolder.get_token()
if organization is None:
A_ = whoami(__UpperCamelCase )["name"]
return f'''{username}/{model_id}'''
else:
return f'''{organization}/{model_id}'''
def __snake_case ( __UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
"Modelcard rendering is based on Jinja templates."
" Please make sure to have `jinja` installed before using `create_model_card`."
" To install it, please run `pip install Jinja2`." )
if hasattr(__UpperCamelCase ,"local_rank" ) and args.local_rank not in [-1, 0]:
return
A_ = args.hub_token if hasattr(__UpperCamelCase ,"hub_token" ) else None
A_ = get_full_repo_name(__UpperCamelCase ,token=__UpperCamelCase )
A_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="en" ,license="apache-2.0" ,library_name="diffusers" ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=__UpperCamelCase ,model_name=__UpperCamelCase ,repo_name=__UpperCamelCase ,dataset_name=args.dataset_name if hasattr(__UpperCamelCase ,"dataset_name" ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(__UpperCamelCase ,"gradient_accumulation_steps" ) else None
) ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta1" ) else None ,adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase ,"adam_beta2" ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase ,"adam_weight_decay" ) else None ,adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase ,"adam_epsilon" ) else None ,lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase ,"lr_scheduler" ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase ,"lr_warmup_steps" ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase ,"ema_inv_gamma" ) else None ,ema_power=args.ema_power if hasattr(__UpperCamelCase ,"ema_power" ) else None ,ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase ,"ema_max_decay" ) else None ,mixed_precision=args.mixed_precision ,)
A_ = os.path.join(args.output_dir ,"README.md" )
model_card.save(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Optional[str] ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
A_ = str(Path(__UpperCamelCase ).as_posix() )
A_ = re.search(R"snapshots/([^/]+)/" ,__UpperCamelCase )
if search is None:
return None
A_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
__a :str = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
__a :List[Any] = os.path.join(hf_cache_home, 'diffusers')
def __snake_case ( __UpperCamelCase : Optional[str] = None ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if new_cache_dir is None:
A_ = DIFFUSERS_CACHE
if old_cache_dir is None:
A_ = old_diffusers_cache
A_ = Path(__UpperCamelCase ).expanduser()
A_ = Path(__UpperCamelCase ).expanduser()
for old_blob_path in old_cache_dir.glob("**/blobs/*" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
A_ = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase )
new_blob_path.parent.mkdir(parents=__UpperCamelCase ,exist_ok=__UpperCamelCase )
os.replace(__UpperCamelCase ,__UpperCamelCase )
try:
os.symlink(__UpperCamelCase ,__UpperCamelCase )
except OSError:
logger.warning(
"Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
__a :Dict = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
__a :Optional[int] = 0
else:
with open(cache_version_file) as f:
try:
__a :Dict = int(f.read())
except ValueError:
__a :str = 0
if cache_version < 1:
__a :Optional[Any] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
__a :Optional[Any] = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease "
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure "
'the directory exists and can be written to.'
)
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if variant is not None:
A_ = weights_name.split("." )
A_ = splits[:-1] + [variant] + splits[-1:]
A_ = ".".join(__UpperCamelCase )
return weights_name
def __snake_case ( __UpperCamelCase : Optional[Any] ,*,
__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Any ,__UpperCamelCase : Tuple ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : str ,__UpperCamelCase : int ,__UpperCamelCase : Union[str, Any] ,__UpperCamelCase : int ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : Tuple ,__UpperCamelCase : Optional[int]=None ,):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
return pretrained_model_name_or_path
elif os.path.isdir(__UpperCamelCase ):
if os.path.isfile(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ):
# Load from a PyTorch checkpoint
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ):
A_ = os.path.join(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
return model_file
else:
raise EnvironmentError(
f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse("0.20.0" )
):
try:
A_ = hf_hub_download(
__UpperCamelCase ,filename=_add_variant(__UpperCamelCase ,__UpperCamelCase ) ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
warnings.warn(
f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,__UpperCamelCase ,)
return model_file
except: # noqa: E722
warnings.warn(
f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase ,__UpperCamelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase ,__UpperCamelCase )}\' so that the correct variant file can be added.''' ,__UpperCamelCase ,)
try:
# 2. Load model file as usual
A_ = hf_hub_download(
__UpperCamelCase ,filename=__UpperCamelCase ,cache_dir=__UpperCamelCase ,force_download=__UpperCamelCase ,proxies=__UpperCamelCase ,resume_download=__UpperCamelCase ,local_files_only=__UpperCamelCase ,use_auth_token=__UpperCamelCase ,user_agent=__UpperCamelCase ,subfolder=__UpperCamelCase ,revision=revision or commit_hash ,)
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
"listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
"login`." )
except RevisionNotFoundError:
raise EnvironmentError(
f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
"this model name. Check the model page at "
f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
f''' directory containing a file named {weights_name} or'''
" \nCheckout your internet connection or see how to run the library in"
" offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." )
except EnvironmentError:
raise EnvironmentError(
f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
"'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
f'''containing a file named {weights_name}''' ) | 312 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __A ( a_ :int) -> list[int]:
if num <= 0:
__a : Dict = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(a_)
__a : Union[str, Any] = [True] * (num + 1)
__a : Tuple = []
__a : Optional[int] = 2
__a : List[str] = int(math.sqrt(a_))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a_)
# Set multiples of start be False
for i in range(start * start , num + 1 , a_):
if sieve[i] is True:
__a : str = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(a_)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip()))) | 355 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _lowerCamelCase ( self , _UpperCAmelCase=0 ):
__a : Tuple = floats_tensor((1, 3, 128, 128) , rng=random.Random(_UpperCAmelCase ) )
__a : Any = np.random.RandomState(_UpperCAmelCase )
__a : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Dict = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
__a : List[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
# warmup pass to apply optimizations
__a : Any = pipe(**self.get_dummy_inputs() )
__a : List[str] = self.get_dummy_inputs()
__a : Tuple = pipe(**_UpperCAmelCase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : int = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[Any] = self.get_dummy_inputs()
__a : Any = pipe(**_UpperCAmelCase ).images
__a : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Union[str, Any] = self.get_dummy_inputs()
__a : str = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[int] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
__a : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
__a : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Optional[int] = self.get_dummy_inputs()
__a : Optional[Any] = pipe(**_UpperCAmelCase ).images
__a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__a : Optional[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
__a : Optional[Any] = ort.SessionOptions()
__a : Any = False
return options
def _lowerCamelCase ( self ):
__a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
# using the PNDM scheduler by default
__a : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = '''A fantasy landscape, trending on artstation'''
__a : Tuple = np.random.RandomState(0 )
__a : int = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : List[Any] = output.images
__a : int = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Any = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
__a : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
__a : Tuple = init_image.resize((768, 512) )
__a : str = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
__a : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : List[str] = '''A fantasy landscape, trending on artstation'''
__a : str = np.random.RandomState(0 )
__a : str = pipe(
prompt=_UpperCAmelCase , image=_UpperCAmelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=_UpperCAmelCase , output_type='''np''' , )
__a : Dict = output.images
__a : List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
__a : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 188 | 0 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __lowerCamelCase ( A__ ) -> bool:
"""simple docstring"""
UpperCamelCase = int(number**0.5 )
return number == sq * sq
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ , A__ ) -> tuple[int, int]:
"""simple docstring"""
UpperCamelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
UpperCamelCase = x_den * y_den * z_den
UpperCamelCase = gcd(A__ , A__ )
top //= hcf
bottom //= hcf
return top, bottom
def __lowerCamelCase ( A__ = 35 ) -> int:
"""simple docstring"""
UpperCamelCase = set()
UpperCamelCase = 42
UpperCamelCase = Fraction(0 )
UpperCamelCase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
UpperCamelCase = x_num * y_den + x_den * y_num
UpperCamelCase = x_den * y_den
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
UpperCamelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
UpperCamelCase = x_den * x_den * y_den * y_den
if is_sq(A__ ) and is_sq(A__ ):
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=-1
UpperCamelCase = x_num * y_num
UpperCamelCase = x_den * y_num + x_num * y_den
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
# n=2
UpperCamelCase = x_num * x_num * y_num * y_num
UpperCamelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(A__ ) and is_sq(A__ ):
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = int(sqrt(A__ ) )
UpperCamelCase = gcd(A__ , A__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
UpperCamelCase = add_three(
A__ , A__ , A__ , A__ , A__ , A__ )
unique_s.add(A__ )
for num, den in unique_s:
total += Fraction(A__ , A__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 28 |
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __lowerCamelCase ( A__ , A__ , A__=1e-1_2 ) -> Dict:
"""simple docstring"""
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
UpperCamelCase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A__ , axis=1 ) , a_min=A__ ) ).T
return jnp.matmul(A__ , norm_emb_a.T )
class SCREAMING_SNAKE_CASE ( nn.Module ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = jnp.floataa
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype )
UpperCamelCase = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
UpperCamelCase = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
UpperCamelCase = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.vision_model(UpperCamelCase__ )[1]
UpperCamelCase = self.visual_projection(UpperCamelCase__ )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds )
UpperCamelCase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase = 0.0
UpperCamelCase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase = is_special_care * 0.0_1
UpperCamelCase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase = jnp.round(UpperCamelCase__ , 3 )
UpperCamelCase = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = CLIPConfig
_SCREAMING_SNAKE_CASE = """clip_input"""
_SCREAMING_SNAKE_CASE = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Union[str, Any] , UpperCamelCase__ : CLIPConfig , UpperCamelCase__ : Optional[Tuple] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : jnp.dtype = jnp.floataa , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
"""simple docstring"""
if input_shape is None:
UpperCamelCase = (1, 2_2_4, 2_2_4, 3)
UpperCamelCase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ )
super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init )
def A ( self : int , UpperCamelCase__ : jax.random.KeyArray , UpperCamelCase__ : Tuple , UpperCamelCase__ : FrozenDict = None ):
"""simple docstring"""
UpperCamelCase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = jax.random.split(UpperCamelCase__ )
UpperCamelCase = {'params': params_rng, 'dropout': dropout_rng}
UpperCamelCase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )['params']
return random_params
def __call__( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : dict = None , ):
"""simple docstring"""
UpperCamelCase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
| 28 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : list[list[int]] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : set ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = len(lowerCamelCase_ ), len(grid[0] )
if (
min(lowerCamelCase_ , lowerCamelCase_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_ : Optional[Any] = 0
count += depth_first_search(lowerCamelCase_ , row + 1 , lowerCamelCase_ , lowerCamelCase_ )
count += depth_first_search(lowerCamelCase_ , row - 1 , lowerCamelCase_ , lowerCamelCase_ )
count += depth_first_search(lowerCamelCase_ , lowerCamelCase_ , col + 1 , lowerCamelCase_ )
count += depth_first_search(lowerCamelCase_ , lowerCamelCase_ , col - 1 , lowerCamelCase_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364 | '''simple docstring'''
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
UpperCAmelCase_ : Optional[int] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
UpperCAmelCase_ : List[Any] = pa.array(TypedSequence([1, 2, 3] , try_type=Value('bool' ) , type=Value('int64' ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = pa.array(TypedSequence([1, 2, 3] , type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , type=Value('int64' ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value('int32' ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = pa.array(TypedSequence(['foo', 'bar'] , try_type=Value('int64' ) ) )
self.assertEqual(arr.type , pa.string() )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
UpperCAmelCase_ : Any = pa.array(TypedSequence(['foo', 'bar'] , type=ArrayaD((1, 3) , 'int64' ) ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , 'int64' ) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = pa.array(TypedSequence(['foo', 'bar'] , try_type=ArrayaD((1, 3) , 'int64' ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _UpperCamelCase ( self ):
'''simple docstring'''
import PIL.Image
UpperCAmelCase_ : Any = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
'datasets.arrow_writer.cast_to_python_objects' , side_effect=snake_case_ ) as mock_cast_to_python_objects:
UpperCAmelCase_ : int = pa.array(TypedSequence([{'path': None, 'bytes': b'image_bytes'}, pil_image] , type=Image() ) )
UpperCAmelCase_ , UpperCAmelCase_ : Any = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('optimize_list_casting' , snake_case_ )
self.assertFalse(kwargs['optimize_list_casting'] )
def _lowerCamelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : str = pa.BufferReader(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , pa.Buffer ) else pa.memory_map(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = pa.ipc.open_stream(lowerCamelCase_ )
UpperCAmelCase_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pa.BufferOutputStream()
UpperCAmelCase_ : Dict = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : Optional[int] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = pa.BufferOutputStream()
UpperCAmelCase_ : Optional[int] = Features({'labels': ClassLabel(names=['neg', 'pos'] )} )
with ArrowWriter(stream=lowerCamelCase_ , features=lowerCamelCase_ ) as writer:
writer.write({'labels': 0} )
writer.write({'labels': 1} )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
UpperCAmelCase_ : Any = pa.BufferReader(output.getvalue() )
UpperCAmelCase_ : List[str] = pa.ipc.open_stream(lowerCamelCase_ )
UpperCAmelCase_ : pa.Table = f.read_all()
UpperCAmelCase_ : Optional[Any] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowerCamelCase_ )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
def _lowerCamelCase ( lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=[1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ : Any = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : int = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
with pytest.raises(lowerCamelCase_ ):
writer.write({'col_1': 'foo', 'col_2': 1} , key=10 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=10 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = writer.finalize()
@pytest.mark.parametrize('writer_batch_size' , [None, 2, 10] )
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCAmelCase_ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ , hash_salt='split_name' , check_duplicates=lowerCamelCase_ , ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} , key=1 )
writer.write({'col_1': 'bar', 'col_2': 2} , key=2 )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : str ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pa.BufferOutputStream()
UpperCAmelCase_ : Optional[Any] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
writer.write_batch({'col_1': [], 'col_2': []} )
UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : int = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pa.BufferOutputStream()
UpperCAmelCase_ : Optional[int] = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_table(pa.Table.from_pydict({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} ) )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : Any = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('writer_batch_size' , [None, 1, 10] )
@pytest.mark.parametrize(
'fields' , [None, {'col_1': pa.string(), 'col_2': pa.intaa()}, {'col_1': pa.string(), 'col_2': pa.intaa()}] )
def _lowerCamelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : List[str] ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = pa.BufferOutputStream()
UpperCAmelCase_ : Tuple = pa.schema(lowerCamelCase_ ) if fields else None
with ArrowWriter(stream=lowerCamelCase_ , schema=lowerCamelCase_ , writer_batch_size=lowerCamelCase_ ) as writer:
writer.write_row(pa.Table.from_pydict({'col_1': ['foo'], 'col_2': [1]} ) )
writer.write_row(pa.Table.from_pydict({'col_1': ['bar'], 'col_2': [2]} ) )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
UpperCAmelCase_ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ : Union[str, Any] = {'col_1': pa.string(), 'col_2': pa.intaa()}
UpperCAmelCase_ : Tuple = os.path.join(lowerCamelCase_ , 'test.arrow' )
with ArrowWriter(path=lowerCamelCase_ , schema=pa.schema(lowerCamelCase_ ) ) as writer:
writer.write_batch({'col_1': ['foo', 'bar'], 'col_2': [1, 2]} )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowerCamelCase_ , metadata=writer._schema.metadata )
_check_output(lowerCamelCase_ , 1 )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if pa.types.is_list(lowerCamelCase_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
if isinstance(lst[0] , lowerCamelCase_ ):
change_first_primitive_element_in_list(lst[0] , lowerCamelCase_ )
else:
UpperCAmelCase_ : str = value
@pytest.mark.parametrize('optimized_int_type, expected_dtype' , [(None, pa.intaa()), (Value('int32' ), pa.intaa())] )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Dict , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pa.array(TypedSequence(lowerCamelCase_ , optimized_int_type=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'col, expected_dtype' , [
('attention_mask', pa.inta()),
('special_tokens_mask', pa.inta()),
('token_type_ids', pa.inta()),
('input_ids', pa.intaa()),
('other', pa.intaa()),
] , )
@pytest.mark.parametrize('sequence' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
UpperCAmelCase_ : int = copy.deepcopy(lowerCamelCase_ )
UpperCAmelCase_ : str = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : Dict = pa.array(OptimizedTypedSequence(lowerCamelCase_ , col=lowerCamelCase_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('raise_exception' , [False, True] )
def _lowerCamelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
UpperCAmelCase_ : int = str(tmp_path / 'dataset-train.arrow' )
try:
with ArrowWriter(path=lowerCamelCase_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase ( lowerCamelCase_ : Dict ):
"""simple docstring"""
UpperCAmelCase_ : Dict = 'mock://dataset-train.arrow'
with ArrowWriter(path=lowerCamelCase_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowerCamelCase_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowerCamelCase_ )
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=lowerCamelCase_ ) as writer:
writer.write({'col_1': 'foo', 'col_2': 1} )
writer.write({'col_1': 'bar', 'col_2': 2} )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
UpperCAmelCase_ : List[str] = pa.BufferReader(output.getvalue() )
UpperCAmelCase_ : pa.Table = pq.read_table(lowerCamelCase_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('embed_local_files' , [False, True] )
def _lowerCamelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
import PIL.Image
UpperCAmelCase_ : str = str(tmp_path / 'test_image_rgb.jpg' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowerCamelCase_ , format='png' )
UpperCAmelCase_ : int = pa.BufferOutputStream()
with ParquetWriter(
stream=lowerCamelCase_ , features=Features({'image': Image()} ) , embed_local_files=lowerCamelCase_ ) as writer:
writer.write({'image': image_path} )
writer.finalize()
UpperCAmelCase_ : Optional[int] = pa.BufferReader(output.getvalue() )
UpperCAmelCase_ : pa.Table = pq.read_table(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['image'][0]['path'] , lowerCamelCase_ )
with open(lowerCamelCase_ , 'rb' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = pa.schema([pa.field('col_1' , pa.string() , nullable=lowerCamelCase_ )] )
UpperCAmelCase_ : Dict = pa.BufferOutputStream()
with ArrowWriter(stream=lowerCamelCase_ ) as writer:
writer._build_writer(inferred_schema=lowerCamelCase_ )
assert writer._schema == pa.schema([pa.field('col_1' , pa.string() )] )
| 274 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=3 , ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = parent
lowercase__: Tuple = do_resize
lowercase__: Optional[Any] = size if size is not None else {'shortest_edge': 288}
lowercase__: List[str] = size_divisor
lowercase__: str = do_rescale
lowercase__: int = rescale_factor
lowercase__: List[Any] = do_normalize
lowercase__: int = do_center_crop
lowercase__: Optional[Any] = image_mean
lowercase__: str = image_std
lowercase__: Optional[int] = do_pad
lowercase__: List[Any] = batch_size
lowercase__: Optional[int] = num_channels
lowercase__: int = min_resolution
lowercase__: int = max_resolution
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> str:
'''simple docstring'''
if not batched:
lowercase__: Optional[Any] = self.size['shortest_edge']
lowercase__: List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
lowercase__ , lowercase__: str = image.size
else:
lowercase__ , lowercase__: int = image.shape[1], image.shape[2]
lowercase__: Dict = size / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
lowercase__ , lowercase__: Union[str, Any] = size, scale * w
else:
lowercase__ , lowercase__: str = scale * h, size
lowercase__: List[str] = int((1_333 / 800) * size )
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size:
lowercase__: Optional[Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[Any] = newh * scale
lowercase__: List[str] = neww * scale
lowercase__ , lowercase__: Optional[Any] = int(newh + 0.5 ), int(neww + 0.5 )
lowercase__ , lowercase__: Union[str, Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowercase__: Dict = []
for image in image_inputs:
lowercase__ , lowercase__: Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase__: Optional[int] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
lowercase__: Optional[int] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : Union[str, Any] = BridgeTowerImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = BridgeTowerImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_mean' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'image_std' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_normalize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , 'size_divisor' ) )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image processor
lowercase__: Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
lowercase__: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: List[str] = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
# Initialize image processor
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
lowercase__: List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: int = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
# Initialize image processor
lowercase__: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__: Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase__: Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase__: int = image_processing(lowerCAmelCase__ , return_tensors='pt' ).pixel_values
lowercase__ , lowercase__: List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 196 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __a ( __UpperCamelCase ):
__lowercase : Optional[int] = 'data2vec-audio'
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1E-5 , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=16 , lowerCAmelCase__=19 , lowerCAmelCase__=5 , lowerCAmelCase__=0.0_5 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__="sum" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1_500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
lowercase__: int = hidden_size
lowercase__: str = feat_extract_activation
lowercase__: List[Any] = list(lowerCAmelCase__ )
lowercase__: Optional[int] = list(lowerCAmelCase__ )
lowercase__: int = list(lowerCAmelCase__ )
lowercase__: Union[str, Any] = conv_bias
lowercase__: int = num_conv_pos_embeddings
lowercase__: List[str] = num_conv_pos_embedding_groups
lowercase__: List[Any] = conv_pos_kernel_size
lowercase__: Optional[Any] = len(self.conv_dim )
lowercase__: List[str] = num_hidden_layers
lowercase__: List[str] = intermediate_size
lowercase__: Tuple = hidden_act
lowercase__: Any = num_attention_heads
lowercase__: Optional[int] = hidden_dropout
lowercase__: List[str] = attention_dropout
lowercase__: int = activation_dropout
lowercase__: Dict = feat_proj_dropout
lowercase__: str = final_dropout
lowercase__: List[str] = layerdrop
lowercase__: str = layer_norm_eps
lowercase__: Union[str, Any] = initializer_range
lowercase__: Union[str, Any] = vocab_size
lowercase__: Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__: List[str] = mask_time_prob
lowercase__: Tuple = mask_time_length
lowercase__: List[Any] = mask_time_min_masks
lowercase__: Optional[int] = mask_feature_prob
lowercase__: Union[str, Any] = mask_feature_length
lowercase__: List[str] = mask_feature_min_masks
# ctc loss
lowercase__: Union[str, Any] = ctc_loss_reduction
lowercase__: str = ctc_zero_infinity
# adapter
lowercase__: str = add_adapter
lowercase__: List[Any] = adapter_kernel_size
lowercase__: Tuple = adapter_stride
lowercase__: Dict = num_adapter_layers
lowercase__: Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__: List[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__: int = list(lowerCAmelCase__ )
lowercase__: Dict = list(lowerCAmelCase__ )
lowercase__: int = list(lowerCAmelCase__ )
lowercase__: str = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return math.prod(self.conv_stride )
| 196 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
def __init__( self : Optional[int] , **__lowercase : Optional[Any] ) -> Union[str, Any]:
super().__init__(**__lowercase )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self : List[Any] , __lowercase : Union[np.ndarray, bytes, str] , **__lowercase : Dict ) -> int:
return super().__call__(__lowercase , **__lowercase )
def __magic_name__ ( self : Optional[Any] , **__lowercase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] ={}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE__ : Dict =kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] =kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __magic_name__ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : Dict=None , __lowercase : Any="This is a sound of {}." ) -> List[str]:
if isinstance(__lowercase , __lowercase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE__ : Union[str, Any] =requests.get(__lowercase ).content
else:
with open(__lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] =f.read()
if isinstance(__lowercase , __lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =ffmpeg_read(__lowercase , self.feature_extractor.sampling_rate )
if not isinstance(__lowercase , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
SCREAMING_SNAKE_CASE__ : Tuple =self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Any =candidate_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] =[hypothesis_template.format(__lowercase ) for x in candidate_labels]
SCREAMING_SNAKE_CASE__ : List[str] =self.tokenizer(__lowercase , return_tensors=self.framework , padding=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =[text_inputs]
return inputs
def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[int] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =model_inputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowercase ):
SCREAMING_SNAKE_CASE__ : int =text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE__ : str =text_inputs[0][0]
SCREAMING_SNAKE_CASE__ : Dict =self.model(**__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple ={
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __magic_name__ ( self : List[str] , __lowercase : Optional[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =model_outputs.pop('''candidate_labels''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =model_outputs['''logits'''][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Tuple =logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
SCREAMING_SNAKE_CASE__ : int =[
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowercase , __lowercase ) , key=lambda __lowercase : -x[0] )
]
return result | 362 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """biogpt"""
def __init__( self : str , __lowercase : Union[str, Any]=4_23_84 , __lowercase : Union[str, Any]=10_24 , __lowercase : Any=24 , __lowercase : Any=16 , __lowercase : Optional[Any]=40_96 , __lowercase : Any="gelu" , __lowercase : Optional[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : Union[str, Any]=10_24 , __lowercase : List[Any]=0.02 , __lowercase : Tuple=1e-12 , __lowercase : Optional[Any]=True , __lowercase : Optional[Any]=True , __lowercase : Any=0.0 , __lowercase : int=0.0 , __lowercase : str=1 , __lowercase : int=0 , __lowercase : Optional[int]=2 , **__lowercase : Dict , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : str =hidden_size
SCREAMING_SNAKE_CASE__ : int =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple =num_attention_heads
SCREAMING_SNAKE_CASE__ : Any =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] =initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] =scale_embedding
SCREAMING_SNAKE_CASE__ : str =use_cache
SCREAMING_SNAKE_CASE__ : str =layerdrop
SCREAMING_SNAKE_CASE__ : Dict =activation_dropout
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) | 222 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( ) -> int:
return 1
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 200 ) -> int:
return two_pound(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 125 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class __a :
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[int]=13 , __magic_name__ : str=7 , __magic_name__ : Dict=True , __magic_name__ : Dict=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Tuple=99 , __magic_name__ : List[str]=32 , __magic_name__ : int=2 , __magic_name__ : List[str]=4 , __magic_name__ : Tuple=37 , __magic_name__ : Dict="gelu" , __magic_name__ : int=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Optional[int]=5_12 , __magic_name__ : Tuple=16 , __magic_name__ : Optional[int]=2 , __magic_name__ : Optional[int]=0.0_2 , __magic_name__ : Dict=3 , __magic_name__ : str=4 , __magic_name__ : Optional[Any]=None , __magic_name__ : Any=0 , ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : List[Any] = seq_length
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Optional[Any] = use_input_mask
UpperCAmelCase_ : Tuple = use_token_type_ids
UpperCAmelCase_ : int = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Union[str, Any] = projection_dim
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
UpperCAmelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
if self.use_token_type_ids:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = None
if self.use_labels:
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : Optional[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
UpperCAmelCase_ : List[str] = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : str , __magic_name__ : str , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Tuple , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : int = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Any = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : Dict , __magic_name__ : Tuple , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = TFDPRQuestionEncoder(config=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : Optional[int] = model(__magic_name__ , token_type_ids=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def UpperCAmelCase__ ( self : List[Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = TFDPRReader(config=__magic_name__ )
UpperCAmelCase_ : Tuple = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Any = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__a : int = {"feature-extraction": TFDPRQuestionEncoder} if is_tf_available() else {}
__a : str = False
__a : str = False
__a : Dict = False
__a : Optional[Any] = False
__a : Any = False
def UpperCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TFDPRModelTester(self )
UpperCAmelCase_ : Dict = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__magic_name__ )
def UpperCAmelCase__ ( self : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__magic_name__ )
@slow
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = TFDPRContextEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRQuestionEncoder.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = TFDPRReader.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@require_tf
class __a (unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
UpperCAmelCase_ : Optional[int] = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
UpperCAmelCase_ : List[Any] = model(__magic_name__ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
UpperCAmelCase_ : List[str] = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125 | 1 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = tmp_path / "file.csv"
SCREAMING_SNAKE_CASE : Optional[Any] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path / "malformed_file.csv"
SCREAMING_SNAKE_CASE : Dict = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_image.csv"
SCREAMING_SNAKE_CASE : Tuple = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_label.csv"
SCREAMING_SNAKE_CASE : Dict = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return str(lowercase )
@pytest.fixture
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path / "csv_with_int_list.csv"
SCREAMING_SNAKE_CASE : Optional[int] = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(lowercase , "w" ) as f:
f.write(lowercase )
return str(lowercase )
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = Csv()
SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowercase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(lowercase ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
with open(lowercase , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : int = f.read().splitlines()[1]
SCREAMING_SNAKE_CASE : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
SCREAMING_SNAKE_CASE : int = csv._generate_tables([[csv_file_with_image]] )
SCREAMING_SNAKE_CASE : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
SCREAMING_SNAKE_CASE : Dict = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
with open(lowercase , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read().splitlines()[1:]
SCREAMING_SNAKE_CASE : Union[str, Any] = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
SCREAMING_SNAKE_CASE : Dict = csv._generate_tables([[csv_file_with_label]] )
SCREAMING_SNAKE_CASE : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
SCREAMING_SNAKE_CASE : Tuple = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(lowercase ) for label in labels]
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowercase : [int(lowercase ) for i in x.split()]} )
SCREAMING_SNAKE_CASE : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
SCREAMING_SNAKE_CASE : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
SCREAMING_SNAKE_CASE : Union[str, Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 319 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
snake_case = None
snake_case = logging.get_logger(__name__)
snake_case = """▁"""
snake_case = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
snake_case = {
"""vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""},
"""tokenizer_file""": {
"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"""
},
}
snake_case = {
"""google/pegasus-xsum""": 512,
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ : int = PegasusTokenizer
UpperCamelCase_ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="<mask_2>" , UpperCAmelCase_ : Optional[int]="<mask_1>" , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=103 , **UpperCAmelCase_ : Optional[int] , ):
SCREAMING_SNAKE_CASE : Optional[Any] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError(
f'''additional_special_tokens should be of type {type(UpperCAmelCase_ )}, but is'''
f''' {type(UpperCAmelCase_ )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(UpperCAmelCase_ ) , self.offset - 1 )
]
if len(set(UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
SCREAMING_SNAKE_CASE : int = additional_special_tokens_extended
else:
SCREAMING_SNAKE_CASE : Tuple = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , mask_token_sent=UpperCAmelCase_ , offset=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : str = vocab_file
SCREAMING_SNAKE_CASE : str = False if not self.vocab_file else True
def _A ( self : Optional[Any] , UpperCAmelCase_ : Tuple ):
SCREAMING_SNAKE_CASE : Optional[int] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def _A ( self : int , UpperCAmelCase_ : List , UpperCAmelCase_ : Optional[List] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase_ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase_ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def _A ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _A ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE : List[str] = os.path.join(
UpperCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file , UpperCAmelCase_ )
return (out_vocab_file,)
| 319 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :List[str] = logging.get_logger(__name__)
__a :int = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = 'glpn'
def __init__( self : Union[str, Any] , UpperCAmelCase : int=3 , UpperCAmelCase : str=4 , UpperCAmelCase : List[str]=[2, 2, 2, 2] , UpperCAmelCase : Optional[int]=[8, 4, 2, 1] , UpperCAmelCase : str=[32, 64, 160, 256] , UpperCAmelCase : Tuple=[7, 3, 3, 3] , UpperCAmelCase : Union[str, Any]=[4, 2, 2, 2] , UpperCAmelCase : Dict=[1, 2, 5, 8] , UpperCAmelCase : int=[4, 4, 4, 4] , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : List[Any]=0.0 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=1E-6 , UpperCAmelCase : Tuple=64 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[str]=-1 , **UpperCAmelCase : Tuple , ):
super().__init__(**UpperCAmelCase )
A_ = num_channels
A_ = num_encoder_blocks
A_ = depths
A_ = sr_ratios
A_ = hidden_sizes
A_ = patch_sizes
A_ = strides
A_ = mlp_ratios
A_ = num_attention_heads
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = drop_path_rate
A_ = layer_norm_eps
A_ = decoder_hidden_size
A_ = max_depth
A_ = head_in_index | 312 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class _a ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , UpperCAmelCase : int = 768 , ):
super().__init__()
A_ = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
A_ = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def __A ( self : str , UpperCAmelCase : Optional[Union[str, torch.device]] = None , UpperCAmelCase : Optional[torch.dtype] = None , ):
A_ = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
A_ = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def __A ( self : Dict , UpperCAmelCase : List[Any] ):
A_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def __A ( self : int , UpperCAmelCase : int ):
A_ = (embeds * self.std) + self.mean
return embeds | 312 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Optional[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Union[str, Any] ) -> Tuple:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Optional[int] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A__ ( self: Tuple ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: Union[str, Any]=(4, 4, 64, 64) ,lowerCamelCase_: Optional[int]=False ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ : List[str] = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ,dtype=lowerCamelCase_ )
return image
def A__ ( self: List[str] ,lowerCamelCase_: Union[str, Any]=False ,lowerCamelCase_: Tuple="CompVis/stable-diffusion-v1-4" ) -> Union[str, Any]:
UpperCAmelCase_ : int = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ : Optional[Any] = """bf16""" if fpaa else None
UpperCAmelCase_ , UpperCAmelCase_ : Any = FlaxUNetaDConditionModel.from_pretrained(
lowerCamelCase_ ,subfolder="""unet""" ,dtype=lowerCamelCase_ ,revision=lowerCamelCase_ )
return model, params
def A__ ( self: Tuple ,lowerCamelCase_: str=0 ,lowerCamelCase_: Dict=(4, 77, 768) ,lowerCamelCase_: Tuple=False ) -> Union[str, Any]:
UpperCAmelCase_ : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
UpperCAmelCase_ : Any = jnp.array(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ,dtype=lowerCamelCase_ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: List[str] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = self.get_latents(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_encoder_hidden_states(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : int = model.apply(
{"""params""": params} ,lowerCamelCase_ ,jnp.array(lowerCamelCase_ ,dtype=jnp.intaa ) ,encoder_hidden_states=lowerCamelCase_ ,).sample
assert sample.shape == latents.shape
UpperCAmelCase_ : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
UpperCAmelCase_ : Dict = jnp.array(lowerCamelCase_ ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ : int = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_latents(lowerCamelCase_ ,shape=(4, 4, 96, 96) ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Dict = self.get_encoder_hidden_states(lowerCamelCase_ ,shape=(4, 77, 1024) ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = model.apply(
{"""params""": params} ,lowerCamelCase_ ,jnp.array(lowerCamelCase_ ,dtype=jnp.intaa ) ,encoder_hidden_states=lowerCamelCase_ ,).sample
assert sample.shape == latents.shape
UpperCAmelCase_ : List[str] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) ,dtype=jnp.floataa )
UpperCAmelCase_ : Dict = jnp.array(lowerCamelCase_ ,dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
| 59 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase_ = '''true'''
def lowerCamelCase_ ( _a : List[Any] , _a : List[str]=82 , _a : Tuple=16 ):
'''simple docstring'''
set_seed(42 )
UpperCAmelCase_ : int = RegressionModel()
UpperCAmelCase_ : List[Any] = deepcopy(_a )
UpperCAmelCase_ : Tuple = RegressionDataset(length=_a )
UpperCAmelCase_ : int = DataLoader(_a , batch_size=_a )
model.to(accelerator.device )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = accelerator.prepare(_a , _a )
return model, ddp_model, dataloader
def lowerCamelCase_ ( _a : Accelerator , _a : Optional[int]=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
UpperCAmelCase_ : int = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_a : str ):
UpperCAmelCase_ : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a )
return outputs
with accelerator.main_process_first():
UpperCAmelCase_ : List[str] = dataset.map(
_a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
UpperCAmelCase_ : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_a : List[str] ):
if use_longest:
return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(_a , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 )
def lowerCamelCase_ ( _a : Any , _a : int ):
'''simple docstring'''
UpperCAmelCase_ : int = Accelerator(dispatch_batches=_a , split_batches=_a )
UpperCAmelCase_ : Dict = get_dataloader(_a , not dispatch_batches )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = accelerator.prepare(_a , _a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase_ ( _a : Optional[int] , _a : Optional[Any] , _a : str ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = []
for batch in dataloader:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = batch.values()
with torch.no_grad():
UpperCAmelCase_ : str = model(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(_a )
targs.append(_a )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = torch.cat(_a ), torch.cat(_a )
return logits, targs
def lowerCamelCase_ ( _a : Accelerator , _a : str=82 , _a : str=False , _a : Dict=False , _a : Dict=16 ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_basic_setup(_a , _a , _a )
UpperCAmelCase_ , UpperCAmelCase_ : Any = generate_predictions(_a , _a , _a )
assert (
len(_a ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}'''
def lowerCamelCase_ ( _a : bool = False , _a : bool = False ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = evaluate.load("""glue""" , """mrpc""" )
UpperCAmelCase_ , UpperCAmelCase_ : str = get_mrpc_setup(_a , _a )
# First do baseline
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = setup["""no"""]
model.to(_a )
model.eval()
for batch in dataloader:
batch.to(_a )
with torch.inference_mode():
UpperCAmelCase_ : str = model(**_a )
UpperCAmelCase_ : Any = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_a , references=batch["""labels"""] )
UpperCAmelCase_ : str = metric.compute()
# Then do distributed
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
UpperCAmelCase_ : List[str] = model(**_a )
UpperCAmelCase_ : str = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = batch["""labels"""]
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_a , references=_a )
UpperCAmelCase_ : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_a , _a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
UpperCAmelCase_ : Optional[int] = Accelerator(split_batches=_a , dispatch_batches=_a )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
UpperCAmelCase_ : str = Accelerator()
test_torch_metrics(_a , 512 )
accelerator.state._reset_state()
def lowerCamelCase_ ( _a : Optional[Any] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 59 | 1 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_snake_case = "pytorch_model.bin"
_snake_case = "pytorch_model.bin.index.json"
_snake_case = "adapter_config.json"
_snake_case = "adapter_model.bin"
_snake_case = "adapter_model.safetensors"
_snake_case = "tf_model.h5"
_snake_case = "tf_model.h5.index.json"
_snake_case = "model.ckpt"
_snake_case = "flax_model.msgpack"
_snake_case = "flax_model.msgpack.index.json"
_snake_case = "model.safetensors"
_snake_case = "model.safetensors.index.json"
_snake_case = "config.json"
_snake_case = "preprocessor_config.json"
_snake_case = FEATURE_EXTRACTOR_NAME
_snake_case = "generation_config.json"
_snake_case = "modelcard.json"
_snake_case = "▁"
_snake_case = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_snake_case = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_snake_case = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_snake_case = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def A ( _lowerCamelCase ):
'''simple docstring'''
if version.parse(_lowerCamelCase ) < version.parse(_lowerCamelCase ):
if "dev" in min_version:
_lowerCAmelCase : Tuple = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
_lowerCAmelCase : int = F"This example requires a minimum version of {min_version},"
error_message += F" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 36 |
"""simple docstring"""
def lowercase (snake_case__ : list[int] , snake_case__ : list[int] ) -> tuple[float, float]:
'''simple docstring'''
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = equationa
# Calculate the determinants of the matrices
lowerCAmelCase = aa * ba - aa * ba
lowerCAmelCase = ca * ba - ca * ba
lowerCAmelCase = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
lowerCAmelCase = determinant_x / determinant
lowerCAmelCase = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 155 | 0 |
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : Any ):
"""simple docstring"""
print("\nThe shortest path matrix using Floyd Warshall algorithm\n" )
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
if dist[i][j] != float("inf" ):
print(int(dist[i][j] ) ,end="\t" )
else:
print("INF" ,end="\t" )
print()
def __snake_case ( __UpperCamelCase : Optional[int] ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = [[float("inf" ) for _ in range(__lowerCAmelCase )] for _ in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
A_ = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(__lowerCAmelCase ):
# looping through rows of graph array
for i in range(__lowerCAmelCase ):
# looping through columns of graph array
for j in range(__lowerCAmelCase ):
if (
dist[i][k] != float("inf" )
and dist[k][j] != float("inf" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ = dist[i][k] + dist[k][j]
_print_dist(__lowerCAmelCase ,__lowerCAmelCase )
return dist, v
if __name__ == "__main__":
__a :List[Any] = int(input('Enter number of vertices: '))
__a :Union[str, Any] = int(input('Enter number of edges: '))
__a :Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
__a :Any = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
__a :Tuple = int(input('Enter source:'))
__a :int = int(input('Enter destination:'))
__a :Dict = float(input('Enter weight:'))
__a :List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0 | 353 |
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
if is_torch_version("<" ,"2.0.0" ) or not hasattr(__UpperCamelCase ,"_dynamo" ):
return False
return isinstance(__UpperCamelCase ,torch._dynamo.eval_frame.OptimizedModule )
def __snake_case ( __UpperCamelCase : List[str] ,__UpperCamelCase : bool = True ):
"""simple docstring"""
A_ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
A_ = is_compiled_module(__UpperCamelCase )
if is_compiled:
A_ = model
A_ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = model.module
if not keep_fpaa_wrapper:
A_ = getattr(__UpperCamelCase ,"forward" )
A_ = model.__dict__.pop("_original_forward" ,__UpperCamelCase )
if original_forward is not None:
while hasattr(__UpperCamelCase ,"__wrapped__" ):
A_ = forward.__wrapped__
if forward == original_forward:
break
A_ = forward
if getattr(__UpperCamelCase ,"_converted_to_transformer_engine" ,__UpperCamelCase ):
convert_model(__UpperCamelCase ,to_transformer_engine=__UpperCamelCase )
if is_compiled:
A_ = model
A_ = compiled_model
return model
def __snake_case ( ):
"""simple docstring"""
PartialState().wait_for_everyone()
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__UpperCamelCase ,__UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(__UpperCamelCase ,__UpperCamelCase )
@contextmanager
def __snake_case ( **__UpperCamelCase : Any ):
"""simple docstring"""
for key, value in kwargs.items():
A_ = str(__UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __snake_case ( __UpperCamelCase : Optional[Any] ):
"""simple docstring"""
if not hasattr(__UpperCamelCase ,"__qualname__" ) and not hasattr(__UpperCamelCase ,"__name__" ):
A_ = getattr(__UpperCamelCase ,"__class__" ,__UpperCamelCase )
if hasattr(__UpperCamelCase ,"__qualname__" ):
return obj.__qualname__
if hasattr(__UpperCamelCase ,"__name__" ):
return obj.__name__
return str(__UpperCamelCase )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
for key, value in source.items():
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = destination.setdefault(__UpperCamelCase ,{} )
merge_dicts(__UpperCamelCase ,__UpperCamelCase )
else:
A_ = value
return destination
def __snake_case ( __UpperCamelCase : int = None ):
"""simple docstring"""
if port is None:
A_ = 2_9500
with socket.socket(socket.AF_INET ,socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0 | 329 | 0 |
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE = 4_000_000 ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = b, a + b
return sum(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153 |
"""simple docstring"""
class _lowerCamelCase :
def __init__(self , __a ) -> None:
UpperCamelCase = len(__a )
UpperCamelCase = [0] * len_array
if len_array > 0:
UpperCamelCase = array[0]
for i in range(1 , __a ):
UpperCamelCase = self.prefix_sum[i - 1] + array[i]
def snake_case_ (self , __a , __a ) -> int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case_ (self , __a ) -> bool:
UpperCamelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 | 1 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
raise RuntimeError("CUDA out of memory." )
class A_ ( nn.Module ):
def __init__( self : Any ):
super().__init__()
_UpperCAmelCase = nn.Linear(3 , 4 )
_UpperCAmelCase = nn.BatchNormad(4 )
_UpperCAmelCase = nn.Linear(4 , 5 )
def lowercase ( self : Union[str, Any] , snake_case_ : Optional[int] ):
return self.lineara(self.batchnorm(self.lineara(snake_case_ ) ) )
class A_ ( unittest.TestCase ):
def lowercase ( self : List[str] ):
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case_ : Tuple ):
nonlocal batch_sizes
batch_sizes.append(snake_case_ )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(snake_case_ , [1_2_8, 6_4, 3_2, 1_6, 8] )
def lowercase ( self : Tuple ):
_UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case_ : Optional[Any] , snake_case_ : List[str] ):
nonlocal batch_sizes
batch_sizes.append(snake_case_ )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_UpperCAmelCase , _UpperCAmelCase = mock_training_loop_function("hello" )
self.assertListEqual(snake_case_ , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def lowercase ( self : Dict ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(snake_case_ : int ):
pass
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowercase ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case_ : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def lowercase ( self : Optional[int] ):
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(snake_case_ : str , snake_case_ : int , snake_case_ : Union[str, Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function(1_2_8 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def lowercase ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(snake_case_ : List[str] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(snake_case_ ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = torch.cuda.memory_allocated()
_UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , snake_case_ )
_UpperCAmelCase = release_memory(snake_case_ )
self.assertEqual(torch.cuda.memory_allocated() , snake_case_ )
| 156 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__SCREAMING_SNAKE_CASE :List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE :Any = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__SCREAMING_SNAKE_CASE :List[Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
__SCREAMING_SNAKE_CASE :str = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
__SCREAMING_SNAKE_CASE :int = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : int = BertTokenizer
def __init__( self : Union[str, Any] , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : Optional[int]=True , snake_case_ : Optional[Any]="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : List[Any]="[PAD]" , snake_case_ : int="[CLS]" , snake_case_ : Dict="[MASK]" , snake_case_ : Any=True , snake_case_ : int=None , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case_ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case_ ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(snake_case_ , normalizer_state.pop("type" ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**snake_case_ )
_UpperCAmelCase = do_lower_case
def lowercase ( self : str , snake_case_ : str , snake_case_ : Any=None ):
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase ( self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase ( self : Any , snake_case_ : str , snake_case_ : Optional[str] = None ):
_UpperCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 156 | 1 |
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase__ = logging.getLogger(__name__)
class lowerCamelCase_ :
def __init__( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = False
def lowercase_ ( self : List[str] , _A : Union[str, Any] , _A : Union[str, Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
if not self.initialized:
UpperCAmelCase__ : List[Any] = RagRetriever(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
UpperCAmelCase__ : List[Any] = True
def lowercase_ ( self : Tuple ):
'''simple docstring'''
self.retriever.index.init_index()
def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.retriever._main_retrieve(_A , _A )
return doc_ids, retrieved_doc_embeds
class lowerCamelCase_ ( __a ):
def __init__( self : List[Any] , _A : List[Any] , _A : int , _A : str , _A : Any , _A : Any=None ):
'''simple docstring'''
if index is not None and index.is_initialized() and len(_A ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , index=_A , init_retrieval=_A , )
UpperCAmelCase__ : Optional[Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(_A , _A , _A , _A )
for worker in self.retrieval_workers
] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Union[str, Any] ):
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase__ : Any = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = ray.get(random_worker.retrieve.remote(_A , _A ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._main_retrieve(_A , _A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
@classmethod
def lowercase_ ( cls : List[Any] , _A : Optional[int] , _A : Dict=None , **_A : str ):
'''simple docstring'''
return super(_A , cls ).get_tokenizers(_A , _A , **_A )
@classmethod
def lowercase_ ( cls : Union[str, Any] , _A : List[Any] , _A : str , _A : Union[str, Any]=None , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = kwargs.pop('''config''' , _A ) or RagConfig.from_pretrained(_A , **_A )
UpperCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained(_A , config=_A )
UpperCAmelCase__ : List[Any] = rag_tokenizer.question_encoder
UpperCAmelCase__ : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase__ : Optional[Any] = '''custom'''
UpperCAmelCase__ : Any = CustomHFIndex(config.retrieval_vector_size , _A )
else:
UpperCAmelCase__ : List[str] = cls._build_index(_A )
return cls(
_A , question_encoder_tokenizer=_A , generator_tokenizer=_A , retrieval_workers=_A , index=_A , )
| 181 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 | 1 |
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase : Union[str, Any] =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase : List[Any] ={
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
assert len(str(_lowerCamelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
A__ = year // 1_00
A__ = (5 * (century % 4) + 2) % 7
A__ = year % 1_00
A__ = centurian % 12
A__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
A__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
A__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class UpperCAmelCase :
def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :Union[str, Any]=13 , lowercase_ :Union[str, Any]=10 , lowercase_ :Any=3 , lowercase_ :Tuple=2 , lowercase_ :List[Any]=2 , lowercase_ :int=True , lowercase_ :int=True , lowercase_ :List[str]=32 , lowercase_ :Dict=5 , lowercase_ :List[Any]=4 , lowercase_ :List[Any]=37 , lowercase_ :List[Any]="gelu" , lowercase_ :int=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :List[Any]=10 , lowercase_ :int=0.0_2 , lowercase_ :Union[str, Any]="divided_space_time" , lowercase_ :Tuple=None , )-> Tuple:
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = patch_size
A__ = num_frames
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = attention_type
A__ = initializer_range
A__ = scope
A__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A__ = (image_size // patch_size) ** 2
A__ = (num_frames) * self.num_patches_per_frame + 1
def UpperCAmelCase_ ( self :str )-> str:
A__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self :int )-> Any:
A__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A__ = self.num_labels
return config
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :List[str] , lowercase_ :List[Any] , lowercase_ :Tuple )-> Optional[int]:
A__ = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Tuple , lowercase_ :Tuple , lowercase_ :Dict )-> Tuple:
A__ = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
A__ = model(lowercase_ )
# verify the logits shape
A__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
A__ = self.prepare_config_and_inputs()
A__, A__, A__ = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__lowercase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowercase = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__ = TimesformerModelTester(self )
A__ = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :int , lowercase_ :Dict , lowercase_ :int=False )-> str:
A__ = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
A__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
pass
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(lowercase_ )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCAmelCase_ ( self :Dict )-> Optional[int]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def UpperCAmelCase_ ( self :Any )-> List[Any]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase_ ( self :List[str] )-> str:
if not self.has_attentions:
pass
else:
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = self.model_tester.seq_length
A__ = self.model_tester.num_frames
A__ = True
A__ = False
A__ = True
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
A__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
A__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A__ = len(lowercase_ )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
A__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCAmelCase_ ( self :List[Any] )-> List[str]:
def check_hidden_states_output(lowercase_ :Dict , lowercase_ :int , lowercase_ :List[Any] ):
A__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
A__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__, A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def UpperCamelCase ( ):
A__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A__ = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self :Optional[Any] )-> int:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self :int )-> Any:
A__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase_ )
A__ = self.default_image_processor
A__ = prepare_video()
A__ = image_processor(video[:8] , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
A__ = model(**lowercase_ )
# verify the logits
A__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
A__ = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 123 | 1 |
def snake_case_ ( snake_case = 1_00 ) -> Tuple:
lowercase__: int = set()
lowercase__: Tuple = 0
lowercase__: str = n + 1 # maximum limit
for a in range(2 , A__ ):
for b in range(2 , A__ ):
lowercase__: str = a**b # calculates the current power
collect_powers.add(A__ ) # adds the result to the set
return len(A__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 196 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case ( _SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = BioGptTokenizer
lowercase = False
def __lowercase ( self : List[Any] ) -> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ : int = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowerCAmelCase_ : int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any ) -> List[str]:
lowerCAmelCase_ : Dict = """lower newer"""
lowerCAmelCase_ : str = """lower newer"""
return input_text, output_text
def __lowercase ( self : Optional[int] ) -> str:
lowerCAmelCase_ : Any = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ : int = """lower"""
lowerCAmelCase_ : str = ["""low""", """er</w>"""]
lowerCAmelCase_ : Any = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
lowerCAmelCase_ : Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ : Optional[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@slow
def __lowercase ( self : str ) -> Optional[Any]:
lowerCAmelCase_ : Dict = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ : int = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCamelCase )
lowerCAmelCase_ : str = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
lowerCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 120 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
snake_case : Optional[int] = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any]=None ):
require_version(deps[pkg] , __lowerCAmelCase )
| 363 |
from math import ceil, sqrt
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
a__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
a__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
a__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f"""{solution() = }""")
| 109 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A__ : Union[str, Any] =logging.get_logger(__name__)
A__ : Optional[Any] ='''▁'''
A__ : List[str] ={
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
A__ : Tuple ={
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
A__ : List[Any] ={
'''facebook/s2t-small-librispeech-asr''': 10_24,
}
A__ : Optional[int] =['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
A__ : Tuple ={'''mustc''': MUSTC_LANGS}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Union[str, Any] = VOCAB_FILES_NAMES
_lowercase: str = PRETRAINED_VOCAB_FILES_MAP
_lowercase: str = MAX_MODEL_INPUT_SIZES
_lowercase: int = ['''input_ids''', '''attention_mask''']
_lowercase: List[int] = []
def __init__( self : List[Any] , __snake_case : int , __snake_case : int , __snake_case : Union[str, Any]="<s>" , __snake_case : Any="</s>" , __snake_case : List[str]="<pad>" , __snake_case : Optional[int]="<unk>" , __snake_case : Optional[Any]=False , __snake_case : Any=False , __snake_case : Optional[Any]=None , __snake_case : List[str]=None , __snake_case : Optional[Dict[str, Any]] = None , **__snake_case : List[str] , ) -> None:
_lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , do_upper_case=__snake_case , do_lower_case=__snake_case , tgt_lang=__snake_case , lang_codes=__snake_case , sp_model_kwargs=self.sp_model_kwargs , **__snake_case , )
_lowerCAmelCase = do_upper_case
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = load_json(__snake_case )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase = spm_file
_lowerCAmelCase = load_spm(__snake_case , self.sp_model_kwargs )
if lang_codes is not None:
_lowerCAmelCase = lang_codes
_lowerCAmelCase = LANGUAGES[lang_codes]
_lowerCAmelCase = [f"<lang:{lang}>" for lang in self.langs]
_lowerCAmelCase = {lang: self.sp_model.PieceToId(f"<lang:{lang}>" ) for lang in self.langs}
_lowerCAmelCase = self.lang_tokens
_lowerCAmelCase = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_lowerCAmelCase = {}
@property
def lowercase__ ( self : str ) -> int:
return len(self.encoder )
@property
def lowercase__ ( self : Optional[int] ) -> str:
return self._tgt_lang
@tgt_lang.setter
def lowercase__ ( self : Tuple , __snake_case : int ) -> None:
_lowerCAmelCase = new_tgt_lang
self.set_tgt_lang_special_tokens(__snake_case )
def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> None:
_lowerCAmelCase = self.lang_code_to_id[tgt_lang]
_lowerCAmelCase = [lang_code_id]
def lowercase__ ( self : Optional[Any] , __snake_case : str ) -> List[str]:
return self.sp_model.encode(__snake_case , out_type=__snake_case )
def lowercase__ ( self : Any , __snake_case : int ) -> List[str]:
return self.encoder.get(__snake_case , self.encoder[self.unk_token] )
def lowercase__ ( self : Union[str, Any] , __snake_case : int ) -> str:
return self.decoder.get(__snake_case , self.unk_token )
def lowercase__ ( self : Any , __snake_case : List[str] ) -> str:
_lowerCAmelCase = []
_lowerCAmelCase = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_lowerCAmelCase = self.sp_model.decode(__snake_case )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_lowerCAmelCase = []
else:
current_sub_tokens.append(__snake_case )
_lowerCAmelCase = self.sp_model.decode(__snake_case )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase__ ( self : str , __snake_case : Optional[int] , __snake_case : Any=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase__ ( self : Any , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
_lowerCAmelCase = [1] * len(self.prefix_tokens )
_lowerCAmelCase = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(__snake_case )) + suffix_ones
return prefix_ones + ([0] * len(__snake_case )) + ([0] * len(__snake_case )) + suffix_ones
def lowercase__ ( self : Optional[Any] ) -> Dict:
_lowerCAmelCase = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Dict:
_lowerCAmelCase = self.__dict__.copy()
_lowerCAmelCase = None
return state
def __setstate__( self : Tuple , __snake_case : Dict ) -> None:
_lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase = {}
_lowerCAmelCase = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase__ ( self : int , __snake_case : str , __snake_case : Optional[str] = None ) -> Tuple[str]:
_lowerCAmelCase = Path(__snake_case )
assert save_dir.is_dir(), f"{save_directory} should be a directory"
_lowerCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , __snake_case )
if os.path.abspath(self.spm_file ) != os.path.abspath(__snake_case ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __snake_case )
elif not os.path.isfile(self.spm_file ):
with open(__snake_case , """wb""" ) as fi:
_lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__snake_case )
return (str(__snake_case ), str(__snake_case ))
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = sentencepiece.SentencePieceProcessor(**lowerCAmelCase )
spm.Load(str(lowerCAmelCase ) )
return spm
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
with open(lowerCAmelCase , """r""" ) as f:
return json.load(lowerCAmelCase )
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
with open(lowerCAmelCase , """w""" ) as f:
json.dump(lowerCAmelCase , lowerCAmelCase , indent=2 )
| 70 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
def __init__( self : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any]=1_2 , __lowerCAmelCase : Optional[int]=7 , __lowerCAmelCase : Any=True , __lowerCAmelCase : List[str]=True , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : str=9_9 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : List[Any]=3_2 , __lowerCAmelCase : Union[str, Any]=2 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_7 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : List[Any]=0.1 , __lowerCAmelCase : int=5_1_2 , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : List[Any]=0 , __lowerCAmelCase : int=None , ):
"""simple docstring"""
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Union[str, Any] = seq_length
_lowerCamelCase : Optional[int] = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Any = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : List[str] = projection_dim
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : Union[str, Any] = intermediate_size
_lowerCamelCase : Union[str, Any] = dropout
_lowerCamelCase : Optional[int] = attention_dropout
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : Any = scope
_lowerCamelCase : Optional[Any] = bos_token_id
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase : Optional[int] = None
if self.use_input_mask:
_lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_lowerCamelCase : List[str] = input_mask.numpy()
_lowerCamelCase : int = input_mask.shape
_lowerCamelCase : List[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_UpperCAmelCase ):
_lowerCamelCase : Any = 1
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = self.get_config()
return config, input_ids, tf.convert_to_tensor(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Dict = TFBlipTextModel(config=_UpperCAmelCase )
_lowerCamelCase : Tuple = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , training=_UpperCAmelCase )
_lowerCamelCase : Tuple = model(_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Tuple = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = config_and_inputs
_lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __snake_case ( _UpperCAmelCase , unittest.TestCase):
snake_case__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
snake_case__ : Optional[Any] = False
snake_case__ : str = False
snake_case__ : List[str] = False
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = BlipTextModelTester(self )
_lowerCamelCase : Optional[int] = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Dict = TFBlipTextModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : List[str]=True ):
"""simple docstring"""
super().test_pt_tf_model_equivalence(allow_missing_keys=_UpperCAmelCase )
| 352 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowercase)
class __snake_case ( _lowercase):
def __init__( self : Any , **__lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(__lowerCAmelCase )
def __call__( self : Dict , __lowerCAmelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCAmelCase : Union[str, List[str]] = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
if "text_queries" in kwargs:
_lowerCamelCase : List[Any] = kwargs.pop('''text_queries''' )
if isinstance(__lowerCAmelCase , (str, Image.Image) ):
_lowerCamelCase : Optional[int] = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
_lowerCamelCase : List[Any] = image
_lowerCamelCase : List[str] = super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( self : List[Any] , **__lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : int = {}
if "threshold" in kwargs:
_lowerCamelCase : Optional[Any] = kwargs['''threshold''']
if "top_k" in kwargs:
_lowerCamelCase : int = kwargs['''top_k''']
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = load_image(inputs['''image'''] )
_lowerCamelCase : Optional[Any] = inputs['''candidate_labels''']
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCamelCase : int = candidate_labels.split(''',''' )
_lowerCamelCase : Tuple = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(__lowerCAmelCase ):
_lowerCamelCase : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework )
_lowerCamelCase : Optional[Any] = self.image_processor(__lowerCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(__lowerCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = model_inputs.pop('''target_size''' )
_lowerCamelCase : List[Any] = model_inputs.pop('''candidate_label''' )
_lowerCamelCase : Dict = model_inputs.pop('''is_last''' )
_lowerCamelCase : str = self.model(**__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=None ):
"""simple docstring"""
_lowerCamelCase : str = []
for model_output in model_outputs:
_lowerCamelCase : Any = model_output['''candidate_label''']
_lowerCamelCase : Union[str, Any] = BaseModelOutput(__lowerCAmelCase )
_lowerCamelCase : Tuple = self.image_processor.post_process_object_detection(
outputs=__lowerCAmelCase , threshold=__lowerCAmelCase , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
_lowerCamelCase : Tuple = outputs['''scores'''][index].item()
_lowerCamelCase : Optional[Any] = self._get_bounding_box(outputs['''boxes'''][index][0] )
_lowerCamelCase : Optional[Any] = {'''score''': score, '''label''': label, '''box''': box}
results.append(__lowerCAmelCase )
_lowerCamelCase : int = sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )
if top_k:
_lowerCamelCase : Dict = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = box.int().tolist()
_lowerCamelCase : Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 175 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : List[str] ,lowercase_ : Any=1_0_0 ,lowercase_ : str=1_3 ,lowercase_ : Any=3_0 ,lowercase_ : Optional[int]=2 ,lowercase_ : Dict=3 ,lowercase_ : Optional[int]=True ,lowercase_ : Union[str, Any]=True ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : List[Any]=5 ,lowercase_ : Any=4 ,lowercase_ : Optional[int]=3_7 ,lowercase_ : List[Any]="gelu" ,lowercase_ : Dict=0.1 ,lowercase_ : List[Any]=0.1 ,lowercase_ : int=1_0 ,lowercase_ : int=0.02 ,lowercase_ : List[str]=3 ,):
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Union[str, Any] = image_size
lowerCAmelCase__ : Optional[int] = patch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Optional[Any] = use_labels
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Dict = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : str = num_patches + 1
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : List[str] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowercase_ ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Any ):
lowerCAmelCase__ : Any = FlaxBeitModel(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict ,lowercase_ : Any ,lowercase_ : Union[str, Any] ,lowercase_ : Any ):
lowerCAmelCase__ : Optional[int] = FlaxBeitForMaskedImageModeling(config=lowercase_ )
lowerCAmelCase__ : str = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : str ,lowercase_ : List[str] ,lowercase_ : str ):
lowerCAmelCase__ : Dict = self.type_sequence_label_size
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=lowercase_ )
lowerCAmelCase__ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(lowercase_ )
lowerCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(lowercase_ )
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase__ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitModelTester(self )
lowerCAmelCase__ : str = ConfigTester(self ,config_class=lowercase_ ,has_text_modality=lowercase_ ,hidden_size=3_7 )
def __lowerCAmelCase ( self : Dict ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(lowercase_ )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase_ )
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(lowercase_ ,lowercase_ )
lowerCAmelCase__ : Dict = model_class(lowercase_ )
@jax.jit
def model_jitted(lowercase_ : Dict ,**lowercase_ : List[Any] ):
return model(pixel_values=lowercase_ ,**lowercase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase__ : Any = model_jitted(**lowercase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase__ : int = model_jitted(**lowercase_ ).to_tuple()
self.assertEqual(len(lowercase_ ) ,len(lowercase_ ) )
for jitted_output, output in zip(lowercase_ ,lowercase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def __lowerCAmelCase ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : List[Any] = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowercase_ )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self : Dict ):
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
lowerCAmelCase__ : str = np.ones((1, 1_9_6) ,dtype=lowercase_ )
# forward pass
lowerCAmelCase__ : Dict = model(pixel_values=lowercase_ ,bool_masked_pos=lowercase_ )
lowerCAmelCase__ : str = outputs.logits
# verify the logits
lowerCAmelCase__ : Any = (1, 1_9_6, 8_1_9_2)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : str = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,lowercase_ ,atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : List[Any] = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Dict = (1, 1_0_0_0)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Dict = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Union[str, Any] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
@slow
def __lowerCAmelCase ( self : List[Any] ):
lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(images=lowercase_ ,return_tensors='''np''' )
# forward pass
lowerCAmelCase__ : Tuple = model(**lowercase_ )
lowerCAmelCase__ : int = outputs.logits
# verify the logits
lowerCAmelCase__ : Optional[int] = (1, 2_1_8_4_1)
self.assertEqual(logits.shape ,lowercase_ )
lowerCAmelCase__ : Union[str, Any] = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] ,lowercase_ ,atol=1E-4 ) )
lowerCAmelCase__ : Optional[int] = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() ,lowercase_ )
| 106 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) ->int:
"""simple docstring"""
lowercase : Tuple = HfArgumentParser(_UpperCamelCase )
lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
lowercase : Optional[int] = TensorFlowBenchmark(args=_UpperCamelCase )
try:
lowercase : Any = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
lowercase : Any = ''' '''.join(str(_UpperCamelCase ).split(''' ''' )[:-1] )
lowercase : Any = ''''''
lowercase : str = eval(str(_UpperCamelCase ).split(''' ''' )[-1] )
lowercase : List[str] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(_UpperCamelCase )
raise ValueError(_UpperCamelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 337 | 0 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any]=sys.maxsize ):
UpperCamelCase :List[str] = '''bilinear'''
UpperCamelCase :List[str] = max_size
UpperCamelCase :List[Any] = short_edge_length
def __call__( self : List[str] , __lowerCamelCase : List[str] ):
UpperCamelCase :int = []
for img in imgs:
UpperCamelCase :Dict = img.shape[:2]
# later: provide list and randomly choose index for resize
UpperCamelCase :str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
UpperCamelCase :List[Any] = size * 1.0 / min(snake_case_ , snake_case_ )
if h < w:
UpperCamelCase :Dict = size, scale * w
else:
UpperCamelCase :Optional[Any] = scale * h, size
if max(snake_case_ , snake_case_ ) > self.max_size:
UpperCamelCase :Tuple = self.max_size * 1.0 / max(snake_case_ , snake_case_ )
UpperCamelCase :List[Any] = newh * scale
UpperCamelCase :int = neww * scale
UpperCamelCase :Dict = int(neww + 0.5 )
UpperCamelCase :Dict = int(newh + 0.5 )
if img.dtype == np.uinta:
UpperCamelCase :Optional[Any] = Image.fromarray(snake_case_ )
UpperCamelCase :str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
UpperCamelCase :Optional[Any] = np.asarray(snake_case_ )
else:
UpperCamelCase :Tuple = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
UpperCamelCase :Any = nn.functional.interpolate(
snake_case_ , (newh, neww) , mode=self.interp_method , align_corners=snake_case_ ).squeeze(0 )
img_augs.append(snake_case_ )
return img_augs
class _SCREAMING_SNAKE_CASE :
def __init__( self : int , __lowerCamelCase : List[Any] ):
UpperCamelCase :Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
UpperCamelCase :Union[str, Any] = cfg.INPUT.FORMAT
UpperCamelCase :Dict = cfg.SIZE_DIVISIBILITY
UpperCamelCase :List[Any] = cfg.PAD_VALUE
UpperCamelCase :Dict = cfg.INPUT.MAX_SIZE_TEST
UpperCamelCase :List[Any] = cfg.MODEL.DEVICE
UpperCamelCase :Tuple = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase :int = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
UpperCamelCase :Union[str, Any] = lambda __lowerCamelCase : (x - self.pixel_mean) / self.pixel_std
def _A ( self : Union[str, Any] , __lowerCamelCase : Dict ):
UpperCamelCase :List[str] = tuple(max(snake_case_ ) for s in zip(*[img.shape for img in images] ) )
UpperCamelCase :List[Any] = [im.shape[-2:] for im in images]
UpperCamelCase :Optional[int] = [
nn.functional.pad(
snake_case_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(snake_case_ , snake_case_ )
]
return torch.stack(snake_case_ ), torch.tensor(snake_case_ )
def __call__( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : Dict=False ):
with torch.no_grad():
if not isinstance(snake_case_ , snake_case_ ):
UpperCamelCase :str = [images]
if single_image:
assert len(snake_case_ ) == 1
for i in range(len(snake_case_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(snake_case_ , images.pop(snake_case_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
snake_case_ , torch.as_tensor(img_tensorize(images.pop(snake_case_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
UpperCamelCase :Optional[Any] = torch.tensor([im.shape[:2] for im in images] )
UpperCamelCase :int = self.aug(snake_case_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
UpperCamelCase :str = [self.normalizer(snake_case_ ) for x in images]
# now pad them to do the following operations
UpperCamelCase :List[Any] = self.pad(snake_case_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
UpperCamelCase :str = torch.true_divide(snake_case_ , snake_case_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] , __magic_name__ : Tuple[int, int] ) -> str:
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
UpperCamelCase :Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase__ )
| 351 |
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ : Optional[Any] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :List[str] = []
for i in range(len(__magic_name__ ) ):
UpperCamelCase :Optional[int] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
UpperCamelCase :Tuple = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__magic_name__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__magic_name__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__magic_name__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
UpperCamelCase :str = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__magic_name__ )
return next_generation
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[list[int]] , __magic_name__ : int ) -> list[Image.Image]:
"""simple docstring"""
UpperCamelCase :int = []
for _ in range(__magic_name__ ):
# Create output image
UpperCamelCase :int = Image.new("""RGB""" , (len(cells[0] ), len(__magic_name__ )) )
UpperCamelCase :Tuple = img.load()
# Save cells to image
for x in range(len(__magic_name__ ) ):
for y in range(len(cells[0] ) ):
UpperCamelCase :Union[str, Any] = 255 - cells[y][x] * 255
UpperCamelCase :int = (colour, colour, colour)
# Save image
images.append(__magic_name__ )
UpperCamelCase :Any = new_generation(__magic_name__ )
return images
if __name__ == "__main__":
UpperCAmelCase_ : Any = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 62 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class _a ( datasets.BeamBasedBuilder):
def UpperCAmelCase__( self : Optional[Any] )-> Any:
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple )-> str:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int )-> Any:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
class _a ( datasets.BeamBasedBuilder):
def UpperCAmelCase__( self : Any )-> Tuple:
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any )-> str:
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict )-> str:
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_SCREAMING_SNAKE_CASE )
def lowerCamelCase_ ( ):
"""simple docstring"""
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def lowerCamelCase_ ( ):
"""simple docstring"""
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class _a ( _lowercase):
@require_beam
def UpperCAmelCase__( self : Tuple )-> Dict:
lowerCAmelCase__ : Union[str, Any] = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowerCAmelCase__ : List[str] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCAmelCase__( self : Dict )-> int:
import apache_beam as beam
lowerCAmelCase__ : Union[str, Any] = beam.io.parquetio.WriteToParquet
lowerCAmelCase__ : Any = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
lowerCAmelCase__ : List[str] = partial(_SCREAMING_SNAKE_CASE , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train-00000-of-00002.arrow' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
lowerCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _SCREAMING_SNAKE_CASE )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def UpperCAmelCase__( self : List[str] )-> List[str]:
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def UpperCAmelCase__( self : int )-> Any:
lowerCAmelCase__ : Tuple = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
lowerCAmelCase__ : Optional[int] = NestedBeamDataset(cache_dir=_SCREAMING_SNAKE_CASE , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , F'{builder.name}-train.arrow' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
lowerCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _SCREAMING_SNAKE_CASE )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _SCREAMING_SNAKE_CASE )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 131 |
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : Any = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase__ : Union[str, Any] = remove_duplicates(key.upper() )
lowerCAmelCase__ : Dict = len(_a )
# First fill cipher with key characters
lowerCAmelCase__ : Any = {alphabet[i]: char for i, char in enumerate(_a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_a ) , 26 ):
lowerCAmelCase__ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase__ : str = alphabet[i - offset]
lowerCAmelCase__ : Dict = char
return cipher_alphabet
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
return "".join(cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( _a , _a ):
"""simple docstring"""
lowerCAmelCase__ : int = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_a , _a ) for ch in message.upper() )
def lowerCamelCase_ ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = input('''Enter message to encode or decode: ''' ).strip()
lowerCAmelCase__ : Tuple = input('''Enter keyword: ''' ).strip()
lowerCAmelCase__ : List[str] = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCAmelCase__ : List[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCAmelCase__ : Dict = create_cipher_map(_a )
print(func(_a , _a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 131 | 1 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __magic_name__ :
def UpperCAmelCase_ ( self )-> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase_ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
UpperCamelCase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase_ ( self )-> Tuple:
torch.manual_seed(0 )
UpperCamelCase_ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
UpperCamelCase_ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCamelCase_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
UpperCamelCase_ = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0_001 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCamelCase_ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs(_lowercase )
UpperCamelCase_ = inputs["prompt"]
UpperCamelCase_ = inputs["generator"]
UpperCamelCase_ = inputs["num_inference_steps"]
UpperCamelCase_ = inputs["output_type"]
if "image" in inputs:
UpperCamelCase_ = inputs["image"]
else:
UpperCamelCase_ = None
if "mask_image" in inputs:
UpperCamelCase_ = inputs["mask_image"]
else:
UpperCamelCase_ = None
if "original_image" in inputs:
UpperCamelCase_ = inputs["original_image"]
else:
UpperCamelCase_ = None
UpperCamelCase_ , UpperCamelCase_ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
UpperCamelCase_ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
UpperCamelCase_ = image
if mask_image is not None:
UpperCamelCase_ = mask_image
if original_image is not None:
UpperCamelCase_ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
UpperCamelCase_ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCamelCase_ = self.get_dummy_inputs(_lowercase )
UpperCamelCase_ = inputs["generator"]
UpperCamelCase_ = inputs["num_inference_steps"]
UpperCamelCase_ = inputs["output_type"]
# inputs with prompt converted to embeddings
UpperCamelCase_ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
UpperCamelCase_ = image
if mask_image is not None:
UpperCamelCase_ = mask_image
if original_image is not None:
UpperCamelCase_ = original_image
UpperCamelCase_ = pipe_loaded(**_lowercase )[0]
UpperCamelCase_ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
UpperCamelCase_ = self.get_dummy_components()
UpperCamelCase_ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
UpperCamelCase_ = self.get_dummy_inputs(_lowercase )
UpperCamelCase_ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
UpperCamelCase_ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCamelCase_ = self.get_dummy_inputs(_lowercase )
UpperCamelCase_ = pipe_loaded(**_lowercase )[0]
UpperCamelCase_ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 60 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[int] = XGLMTokenizer
UpperCamelCase_ :List[str] = XGLMTokenizerFast
UpperCamelCase_ :int = True
UpperCamelCase_ :Dict = True
def UpperCAmelCase_ ( self )-> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> List[Any]:
UpperCamelCase_ = "<pad>"
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(_lowercase ) , 1_008 )
def UpperCAmelCase_ ( self )-> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = XGLMTokenizer(_lowercase , keep_accents=_lowercase )
UpperCamelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_lowercase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(
_lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self )-> Optional[Any]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_lowercase , f.name )
UpperCamelCase_ = XGLMTokenizer(f.name , keep_accents=_lowercase )
UpperCamelCase_ = pickle.dumps(_lowercase )
pickle.loads(_lowercase )
def UpperCAmelCase_ ( self )-> str:
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = "I was born in 92000, and this is falsé."
UpperCamelCase_ = tokenizer.tokenize(_lowercase )
UpperCamelCase_ = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(_lowercase )
UpperCamelCase_ = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = "Hello World!"
UpperCamelCase_ = [2, 31_227, 4_447, 35]
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
UpperCamelCase_ = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_lowercase , self.big_tokenizer.encode(_lowercase ) )
@slow
def UpperCAmelCase_ ( self )-> Union[str, Any]:
# fmt: off
UpperCamelCase_ = {
"input_ids": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="facebook/xglm-564M" , padding=_lowercase , )
| 60 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =42
lowerCamelCase__ =42
def __init__( self : Optional[int] , a : UNetaDModel , a : ScoreSdeVeScheduler ) -> List[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[Any] , a : int = 1 , a : int = 2000 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , **a : List[Any] , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.sample_size
SCREAMING_SNAKE_CASE : List[Any] = (batch_size, 3, img_size, img_size)
SCREAMING_SNAKE_CASE : List[Any] = self.unet
SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(a , generator=a ) * self.scheduler.init_noise_sigma
SCREAMING_SNAKE_CASE : Any = sample.to(self.device )
self.scheduler.set_timesteps(a )
self.scheduler.set_sigmas(a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
SCREAMING_SNAKE_CASE : Tuple = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
SCREAMING_SNAKE_CASE : int = self.unet(a , a ).sample
SCREAMING_SNAKE_CASE : str = self.scheduler.step_correct(a , a , generator=a ).prev_sample
# prediction step
SCREAMING_SNAKE_CASE : Optional[int] = model(a , a ).sample
SCREAMING_SNAKE_CASE : List[str] = self.scheduler.step_pred(a , a , a , generator=a )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = output.prev_sample, output.prev_sample_mean
SCREAMING_SNAKE_CASE : Union[str, Any] = sample_mean.clamp(0 , 1 )
SCREAMING_SNAKE_CASE : Any = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=a ) | 76 |
'''simple docstring'''
from functools import lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 2
UpperCAmelCase : str = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(__magic_name__ )
if n > 1:
factors.add(__magic_name__ )
return factors
@lru_cache
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(unique_prime_factors(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return len(set(__magic_name__ ) ) in (0, 1)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = 2
while True:
# Increment each value of a generated range
UpperCAmelCase : Any = [base + i for i in range(__magic_name__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
UpperCAmelCase : Dict = [upf_len(__magic_name__ ) for x in group]
checker.append(__magic_name__ )
# If all numbers in the list are equal, return the group variable.
if equality(__magic_name__ ):
return group
# Increment our base variable by 1
base += 1
def lowercase ( __magic_name__ = 4 ):
'''simple docstring'''
UpperCAmelCase : int = run(__magic_name__ )
return results[0] if len(__magic_name__ ) else None
if __name__ == "__main__":
print(solution())
| 311 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : str = LDMTextToImagePipeline
A : Optional[int] = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
A : Dict = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
A : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
A : Any = False
def _lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case_ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
snake_case_ : List[Any] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
snake_case_ : Any = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ : Tuple = CLIPTextModel(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ : Optional[int] = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Tuple:
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
snake_case_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
snake_case_ : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
snake_case_ : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : int = self.get_dummy_components()
snake_case_ : str = LDMTextToImagePipeline(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
snake_case_ : str = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ) -> List[Any]:
snake_case_ : Optional[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 32, 32) )
snake_case_ : Dict = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : str = self.get_inputs(_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
snake_case_ : Optional[Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
snake_case_ : Union[str, Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ) -> List[str]:
snake_case_ : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = np.random.RandomState(_SCREAMING_SNAKE_CASE ).standard_normal((1, 4, 32, 32) )
snake_case_ : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self ) -> Dict:
snake_case_ : Optional[int] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : str = self.get_inputs(_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = pipe(**_SCREAMING_SNAKE_CASE ).images[0]
snake_case_ : Any = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
snake_case_ : str = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 36 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Union[str, Any] = DebertaTokenizer
A : List[Any] = True
A : Dict = DebertaTokenizerFast
def _lowerCAmelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
snake_case_ : Optional[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
snake_case_ : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : Optional[int] = {"unk_token": "[UNK]"}
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_SCREAMING_SNAKE_CASE ) )
def _lowerCAmelCase ( self , **_SCREAMING_SNAKE_CASE ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
snake_case_ : int = "lower newer"
snake_case_ : Dict = "lower newer"
return input_text, output_text
def _lowerCAmelCase ( self ) -> str:
snake_case_ : Tuple = self.get_tokenizer()
snake_case_ : str = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Dict = tokens + [tokenizer.unk_token]
snake_case_ : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : List[str] = self.get_tokenizer()
snake_case_ : str = tokenizer("Hello" , "World" )
snake_case_ : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , _SCREAMING_SNAKE_CASE )
@slow
def _lowerCAmelCase ( self ) -> List[Any]:
snake_case_ : str = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE )
snake_case_ : Optional[int] = tokenizer.encode(
"sequence builders" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE )
snake_case_ : str = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _lowerCAmelCase ( self ) -> Tuple:
snake_case_ : Optional[Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ : str = tokenizer_class.from_pretrained("microsoft/deberta-base" )
snake_case_ : int = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
snake_case_ : Any = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = [tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) for seq in encoding["input_ids"]]
# fmt: off
snake_case_ : List[Any] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ : List[str] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , _SCREAMING_SNAKE_CASE )
for expected, decoded in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 36 | 1 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCamelCase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def lowercase_ ( lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowerCAmelCase__ )
def lowercase_ ( lowerCAmelCase__ : Any ):
"""simple docstring"""
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__UpperCAmelCase : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(lowerCAmelCase__ , id=lowerCAmelCase__ )
| 254 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int = 50 ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 254 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case__ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int]=1.0 , _snake_case : Optional[Any]=None , _snake_case : Any=None ):
if rng is None:
lowerCAmelCase : Tuple = global_rng
lowerCAmelCase : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_( unittest.TestCase ):
def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Optional[int]=4_0_0 , UpperCamelCase_ : Optional[int]=2_0_0_0 , UpperCamelCase_ : Any=1 , UpperCamelCase_ : str=0.0 , UpperCamelCase_ : int=1_6_0_0_0 , UpperCamelCase_ : int=True , UpperCamelCase_ : Optional[int]=True , ):
lowerCAmelCase : Any = parent
lowerCAmelCase : Tuple = batch_size
lowerCAmelCase : Union[str, Any] = min_seq_length
lowerCAmelCase : Any = max_seq_length
lowerCAmelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase : Optional[int] = feature_size
lowerCAmelCase : str = padding_value
lowerCAmelCase : List[str] = sampling_rate
lowerCAmelCase : Dict = return_attention_mask
lowerCAmelCase : Any = do_normalize
def lowerCamelCase__ ( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any=False , UpperCamelCase_ : Any=False ):
def _flatten(UpperCamelCase_ : Dict ):
return list(itertools.chain(*__SCREAMING_SNAKE_CASE ) )
if equal_length:
lowerCAmelCase : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase : Optional[int] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase : Optional[int] = [np.asarray(__SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_( _A , unittest.TestCase ):
__UpperCamelCase = ASTFeatureExtractor
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : int = ASTFeatureExtractionTester(self )
def lowerCamelCase__ ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase : Union[str, Any] = [np.asarray(__SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
lowerCAmelCase : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test batched
lowerCAmelCase : List[Any] = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
lowerCAmelCase : int = feat_extract(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase : str = np.asarray(__SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
lowerCAmelCase : Optional[int] = feat_extract(__SCREAMING_SNAKE_CASE , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3 ) )
@require_torch
def lowerCamelCase__ ( self : Optional[Any] ):
import torch
lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase : List[str] = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase : Optional[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Dict ):
from datasets import load_dataset
lowerCAmelCase : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowerCAmelCase : Optional[int] = ds.sort('''id''' ).select(range(__SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase__ ( self : Dict ):
# fmt: off
lowerCAmelCase : Optional[Any] = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase : Optional[Any] = self._load_datasamples(1 )
lowerCAmelCase : List[Any] = ASTFeatureExtractor()
lowerCAmelCase : Any = feature_extractor(__SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 363 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _snake_case ( _snake_case : Dict ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def _snake_case ( _snake_case : str ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase : str = ord(_snake_case )
if not _is_chinese_char(_snake_case ):
return 0
return 1
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : List[Any] = set()
for token in tokens:
lowerCAmelCase : Union[str, Any] = len(_snake_case ) > 1 and is_chinese(_snake_case )
if chinese_word:
word_set.add(_snake_case )
lowerCAmelCase : List[str] = list(_snake_case )
return word_list
def _snake_case ( _snake_case : List[str] , _snake_case : set() ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase : List[Any] = max([len(_snake_case ) for w in chinese_word_set] )
lowerCAmelCase : Optional[Any] = bert_tokens
lowerCAmelCase, lowerCAmelCase : Any = 0, len(_snake_case )
while start < end:
lowerCAmelCase : str = True
if is_chinese(bert_word[start] ):
lowerCAmelCase : List[Any] = min(end - start , _snake_case )
for i in range(_snake_case , 1 , -1 ):
lowerCAmelCase : str = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase : Optional[Any] = '''##''' + bert_word[j]
lowerCAmelCase : Union[str, Any] = start + i
lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def _snake_case ( _snake_case : List[str] , _snake_case : LTP , _snake_case : BertTokenizer ):
lowerCAmelCase : Optional[int] = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[int] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
lowerCAmelCase : Union[str, Any] = [get_chinese_word(_snake_case ) for r in res]
ltp_res.extend(_snake_case )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : int = []
for i in range(0 , len(_snake_case ) , 100 ):
lowerCAmelCase : Optional[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_snake_case , truncation=_snake_case , max_length=512 )
bert_res.extend(res['''input_ids'''] )
assert len(_snake_case ) == len(_snake_case )
lowerCAmelCase : Union[str, Any] = []
for input_ids, chinese_word in zip(_snake_case , _snake_case ):
lowerCAmelCase : Optional[int] = []
for id in input_ids:
lowerCAmelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(_snake_case )
input_tokens.append(_snake_case )
lowerCAmelCase : Any = add_sub_symbol(_snake_case , _snake_case )
lowerCAmelCase : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_snake_case ):
if token[:2] == "##":
lowerCAmelCase : Any = token[2:]
# save chinese tokens' pos
if len(_snake_case ) == 1 and _is_chinese_char(ord(_snake_case ) ):
ref_id.append(_snake_case )
ref_ids.append(_snake_case )
assert len(_snake_case ) == len(_snake_case )
return ref_ids
def _snake_case ( _snake_case : Dict ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[str] = f.readlines()
lowerCAmelCase : Union[str, Any] = [line.strip() for line in data if len(_snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase : List[str] = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase : Any = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase : int = prepare_ref(_snake_case , _snake_case , _snake_case )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase : List[Any] = [json.dumps(_snake_case ) + '''\n''' for ref in ref_ids]
f.writelines(_snake_case )
if __name__ == "__main__":
snake_case__ : Optional[int] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
snake_case__ : int = parser.parse_args()
main(args)
| 314 | 0 |
class UpperCAmelCase :
def __init__(self : Optional[int] , snake_case__ : list[int] ) -> None:
'''simple docstring'''
snake_case : str = len(snake_case__ )
snake_case : int = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , snake_case__ ):
snake_case : str = self.prefix_sum[i - 1] + array[i]
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : int , snake_case__ : int ) -> int:
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : int ) -> bool:
'''simple docstring'''
snake_case : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(snake_case__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59 |
from __future__ import annotations
__lowerCamelCase = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def UpperCamelCase ( __lowerCamelCase : Matrix , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
if location := find_empty_location(__lowerCamelCase ):
snake_case , snake_case : Union[str, Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
snake_case : List[Any] = digit
if sudoku(__lowerCamelCase ) is not None:
return grid
snake_case : Union[str, Any] = 0
return None
def UpperCamelCase ( __lowerCamelCase : Matrix ):
for row in grid:
for cell in row:
print(__lowerCamelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
__lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 59 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : Any = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
UpperCAmelCase__ : Optional[int] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
UpperCAmelCase__ : Optional[int] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
UpperCAmelCase__ : List[Any] = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCamelCase ) )
UpperCAmelCase__ : Any = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , _lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self , **_lowerCamelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def _a (self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase__ : Any = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.get_tokenizer()
UpperCAmelCase__ : Dict = self.get_rust_tokenizer()
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Tuple = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : List[str] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase__ : str = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
UpperCAmelCase__ : List[str] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Any = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : int = self.prepare_image_inputs()
UpperCAmelCase__ : Any = image_processor(_lowerCamelCase , return_tensors="""np""" )
UpperCAmelCase__ : List[Any] = processor(images=_lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = self.get_image_processor()
UpperCAmelCase__ : str = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : Tuple = """lower newer"""
UpperCAmelCase__ : List[Any] = processor(text=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer(_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.get_image_processor()
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = """lower newer"""
UpperCAmelCase__ : Union[str, Any] = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[int] = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.get_image_processor()
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase__ : Optional[Any] = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : Dict = self.prepare_image_inputs()
UpperCAmelCase__ : List[str] = self.prepare_image_inputs()
UpperCAmelCase__ : Union[str, Any] = processor(images=_lowerCamelCase , visual_prompt=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.get_image_processor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : int = CLIPSegProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : Dict = processor.batch_decode(_lowerCamelCase )
UpperCAmelCase__ : Any = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
| 166 |
"""simple docstring"""
def a__ ( lowerCAmelCase ) -> bool:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase__ : Tuple = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase )
if number < 0:
return False
UpperCAmelCase__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a_ ( __snake_case : Dict=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser('''test''' )
else:
lowerCamelCase_ =argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=__snake_case , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=__snake_case )
return parser
def a_ ( __snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
lowerCamelCase_ =script_name
else:
lowerCamelCase_ =F'''--config_file={args.config_file} {script_name}'''
lowerCamelCase_ =['''accelerate-launch'''] + test_args.split()
lowerCamelCase_ =execute_subprocess_async(__snake_case , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def a_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =test_command_parser()
lowerCamelCase_ =parser.parse_args()
test_command(__snake_case )
if __name__ == "__main__":
main()
| 75 |
'''simple docstring'''
__lowerCAmelCase = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = {"""I""": 1, """V""": 5, """X""": 10, """L""": 50, """C""": 100, """D""": 500, """M""": 1000}
_snake_case = 0
_snake_case = 0
while place < len(_SCREAMING_SNAKE_CASE ):
if (place + 1 < len(_SCREAMING_SNAKE_CASE )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
for arabic, roman in ROMAN:
((_snake_case), (_snake_case)) = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
result.append(roman * factor )
if number == 0:
break
return "".join(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {'''configuration_wavlm''': ['''WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WavLMConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WavLMForAudioFrameClassification''',
'''WavLMForCTC''',
'''WavLMForSequenceClassification''',
'''WavLMForXVector''',
'''WavLMModel''',
'''WavLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->list[int]:
"""simple docstring"""
lowercase : Dict = int(_UpperCamelCase )
# Initialize Result
lowercase : Union[str, Any] = []
# Traverse through all denomination
for denomination in reversed(_UpperCamelCase ):
# Find denominations
while int(_UpperCamelCase ) >= int(_UpperCamelCase ):
total_value -= int(_UpperCamelCase )
answer.append(_UpperCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
__a = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
__a = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 173 | 0 |
import string
def __magic_name__ ( A : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
a = ""
for symbol in message:
if symbol in string.ascii_uppercase:
a = string.ascii_uppercase.find(A )
a = num - key
if num < 0:
a = num + len(string.ascii_uppercase )
a = translated + string.ascii_uppercase[num]
else:
a = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def __magic_name__ ( ):
'''simple docstring'''
a = input("Encrypted message: " )
a = message.upper()
decrypt(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 107 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class snake_case__ (_UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = """openai-gpt"""
SCREAMING_SNAKE_CASE_ : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , __lowerCamelCase : List[str]=4_04_78 , __lowerCamelCase : List[Any]=5_12 , __lowerCamelCase : List[str]=7_68 , __lowerCamelCase : List[str]=12 , __lowerCamelCase : Optional[Any]=12 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Any=1e-5 , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : Optional[int]="cls_index" , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=0.1 , **__lowerCamelCase : Union[str, Any] , ) -> List[str]:
a = vocab_size
a = n_positions
a = n_embd
a = n_layer
a = n_head
a = afn
a = resid_pdrop
a = embd_pdrop
a = attn_pdrop
a = layer_norm_epsilon
a = initializer_range
a = summary_type
a = summary_use_proj
a = summary_activation
a = summary_first_dropout
a = summary_proj_to_labels
super().__init__(**__lowerCamelCase )
| 107 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]:
A__ = tempfile.mkdtemp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , **lowercase_ :Tuple )-> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :int , **lowercase_ :Tuple )-> Union[str, Any]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Any , **lowercase_ :Union[str, Any] )-> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] )-> List[str]:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self :Dict )-> Dict:
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self :Union[str, Any] )-> str:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCAmelCase_ ( self :Any )-> int:
A__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
A__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[int]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = self.prepare_image_inputs()
A__ = image_processor(lowercase_ , return_tensors="np" )
A__ = processor(images=lowercase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase_ ( self :Any )-> Optional[int]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = "lower newer"
A__ = processor(text=lowercase_ )
A__ = tokenizer(lowercase_ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase_ ( self :Tuple )-> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCAmelCase_ ( self :int )-> Tuple:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(lowercase_ )
A__ = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Any )-> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 123 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
A__ = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
A__ = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Any , a : int , a : int ):
"""simple docstring"""
__lowerCamelCase = jnp.ones((batch_size, length) ) / length
return scores
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = 20
__lowerCamelCase = self._get_uniform_logits(batch_size=2 , length=a )
# tweak scores to not be uniform anymore
__lowerCamelCase = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
__lowerCamelCase = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
__lowerCamelCase = jax.nn.softmax(a , axis=-1 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=1.3 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_sharper(a , scores.copy() , cur_len=a ) , axis=-1 )
__lowerCamelCase = jax.nn.softmax(temp_dist_warper_smoother(a , scores.copy() , cur_len=a ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1e-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1e-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create ramp distribution
__lowerCamelCase = np.broadcast_to(np.arange(a )[None, :] , (batch_size, vocab_size) ).copy()
__lowerCamelCase = ramp_logits[1:, : vocab_size // 2] + vocab_size
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = top_k_warp(a , a , cur_len=a )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
__lowerCamelCase = 5
__lowerCamelCase = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
__lowerCamelCase = np.broadcast_to(np.arange(a )[None, :] , (batch_size, length) ).copy()
__lowerCamelCase = top_k_warp_safety_check(a , a , cur_len=a )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = None
__lowerCamelCase = 10
__lowerCamelCase = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
__lowerCamelCase = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
__lowerCamelCase = np.exp(top_p_warp(a , a , cur_len=a ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
__lowerCamelCase = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(a , a , atol=1e-3 ) )
# check edge cases with negative and extreme logits
__lowerCamelCase = np.broadcast_to(np.arange(a )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
__lowerCamelCase = ramp_logits[1] * 1_00.0
# make sure at least 2 tokens are kept
__lowerCamelCase = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
__lowerCamelCase = top_p_warp(a , a , cur_len=a )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a )
# check that min length is applied at length 5
__lowerCamelCase = ids_tensor((batch_size, 20) , vocab_size=20 )
__lowerCamelCase = 5
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = min_dist_processor(a , a , cur_len=a )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = 15
__lowerCamelCase = min_dist_processor(a , a , cur_len=a )
self.assertFalse(jnp.isinf(a ).any() )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a )
# check that all scores are -inf except the bos_token_id score
__lowerCamelCase = ids_tensor((batch_size, 1) , vocab_size=20 )
__lowerCamelCase = 1
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = logits_processor(a , a , cur_len=a )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = logits_processor(a , a , cur_len=a )
self.assertFalse(jnp.isinf(a ).any() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = 4
__lowerCamelCase = 0
__lowerCamelCase = 5
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=a , eos_token_id=a )
# check that all scores are -inf except the eos_token_id when max_length is reached
__lowerCamelCase = ids_tensor((batch_size, 4) , vocab_size=20 )
__lowerCamelCase = 4
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = logits_processor(a , a , cur_len=a )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
__lowerCamelCase = 3
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = logits_processor(a , a , cur_len=a )
self.assertFalse(jnp.isinf(a ).any() )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , a )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=a , eos_token_id=a )
__lowerCamelCase = 10
# no processor list
__lowerCamelCase = temp_dist_warp(a , a , cur_len=a )
__lowerCamelCase = top_k_warp(a , a , cur_len=a )
__lowerCamelCase = top_p_warp(a , a , cur_len=a )
__lowerCamelCase = min_dist_proc(a , a , cur_len=a )
__lowerCamelCase = bos_dist_proc(a , a , cur_len=a )
__lowerCamelCase = eos_dist_proc(a , a , cur_len=a )
# with processor list
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(a , a , cur_len=a )
# scores should be equal
self.assertTrue(jnp.allclose(a , a , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 10
__lowerCamelCase = 15
__lowerCamelCase = 2
__lowerCamelCase = 1
__lowerCamelCase = 15
# dummy input_ids and scores
__lowerCamelCase = ids_tensor((batch_size, sequence_length) , a )
__lowerCamelCase = input_ids.copy()
__lowerCamelCase = self._get_uniform_logits(a , a )
__lowerCamelCase = scores.copy()
# instantiate all dist processors
__lowerCamelCase = FlaxTemperatureLogitsWarper(temperature=0.5 )
__lowerCamelCase = FlaxTopKLogitsWarper(3 )
__lowerCamelCase = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
__lowerCamelCase = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a )
__lowerCamelCase = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a )
__lowerCamelCase = FlaxForcedEOSTokenLogitsProcessor(max_length=a , eos_token_id=a )
__lowerCamelCase = 10
# no processor list
def run_no_processor_list(a : Dict , a : List[str] , a : Dict ):
__lowerCamelCase = temp_dist_warp(a , a , cur_len=a )
__lowerCamelCase = top_k_warp(a , a , cur_len=a )
__lowerCamelCase = top_p_warp(a , a , cur_len=a )
__lowerCamelCase = min_dist_proc(a , a , cur_len=a )
__lowerCamelCase = bos_dist_proc(a , a , cur_len=a )
__lowerCamelCase = eos_dist_proc(a , a , cur_len=a )
return scores
# with processor list
def run_processor_list(a : Optional[int] , a : Tuple , a : str ):
__lowerCamelCase = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
__lowerCamelCase = processor(a , a , cur_len=a )
return scores
__lowerCamelCase = jax.jit(a )
__lowerCamelCase = jax.jit(a )
__lowerCamelCase = jitted_run_no_processor_list(a , a , a )
__lowerCamelCase = jitted_run_processor_list(a , a , a )
# scores should be equal
self.assertTrue(jnp.allclose(a , a , atol=1e-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 67 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 1 |
from typing import Any
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ ):
A__ = data
A__ = None
def __repr__( self ):
return F"""Node({self.data})"""
class UpperCamelCase :
def __init__( self ):
A__ = None
def __iter__( self ):
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __getitem__( self , UpperCAmelCase__ ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
A__ = self.head
for _ in range(UpperCAmelCase__ ):
A__ = current.next
A__ = data
def __A ( self , UpperCAmelCase__ ):
self.insert_nth(len(self ) , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ ):
self.insert_nth(0 , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ):
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
A__ = Node(UpperCAmelCase__ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def __A ( self ): # print every node data
print(self )
def __A ( self ):
return self.delete_nth(0 )
def __A ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __A ( self , UpperCAmelCase__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def __A ( self ):
return self.head is None
def __A ( self ):
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = [
-9,
100,
Node(77345112 ),
"dlrow olleH",
7,
5555,
0,
-192.5_5555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def UpperCamelCase ( )-> int:
"""simple docstring"""
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(_A )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
A__ = input("Enter New Value: " ).strip()
print("New list:" )
print(_A )
print(f"""length of linked_list is : {len(_A )}""" )
if __name__ == "__main__":
main()
| 356 |
from __future__ import annotations
from random import random
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ = None ):
A__ = value
A__ = random()
A__ = None
A__ = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ):
A__ = str(self.value ) + " "
A__ = str(self.left or "" )
A__ = str(self.right or "" )
return value + left + right
def UpperCamelCase ( _A : Node | None , _A : int )-> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
A__ , A__ = split(root.left , _A )
return left, root
else:
A__ , A__ = split(root.right , _A )
return root, right
def UpperCamelCase ( _A : Node | None , _A : Node | None )-> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
A__ = merge(left.right , _A )
return left
else:
A__ = merge(_A , right.left )
return right
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ = Node(_A )
A__ , A__ = split(_A , _A )
return merge(merge(_A , _A ) , _A )
def UpperCamelCase ( _A : Node | None , _A : int )-> Node | None:
"""simple docstring"""
A__ , A__ = split(_A , value - 1 )
A__ , A__ = split(_A , _A )
return merge(_A , _A )
def UpperCamelCase ( _A : Node | None )-> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def UpperCamelCase ( _A : Node | None , _A : str )-> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
A__ = insert(_A , int(arg[1:] ) )
elif arg[0] == "-":
A__ = erase(_A , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
A__ = input()
while args != "q":
A__ = interact_treap(_A , _A )
print(_A )
A__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198 | 0 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def A ( snake_case :List[Any] ) -> Tuple:
__UpperCamelCase = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(snake_case , snake_case )
def A ( snake_case :Any ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase = emb.weight.shape
__UpperCamelCase = nn.Linear(snake_case , snake_case , bias=snake_case )
__UpperCamelCase = emb.weight.data
return lin_layer
def A ( snake_case :Tuple ) -> Tuple:
__UpperCamelCase = torch.load(snake_case , map_location='cpu' )
__UpperCamelCase = Namespace(**checkpoint['cfg']['model'] )
__UpperCamelCase = checkpoint['model']
remove_ignore_keys_(snake_case )
__UpperCamelCase = state_dict['decoder.embed_tokens.weight'].shape[0]
__UpperCamelCase = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
__UpperCamelCase = XGLMConfig(
vocab_size=snake_case , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__UpperCamelCase = XGLMForCausalLM(snake_case )
__UpperCamelCase = model.load_state_dict(snake_case , strict=snake_case )
print(snake_case )
__UpperCamelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
UpperCamelCase : str = parser.parse_args()
UpperCamelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 316 |
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
UpperCamelCase : List[str] = TypeVar("KEY")
UpperCamelCase : List[str] = TypeVar("VAL")
@dataclass(frozen=__SCREAMING_SNAKE_CASE , slots=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowercase = 42
lowercase = 42
class __lowerCAmelCase ( _Item ):
def __init__( self ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __bool__( self ):
'''simple docstring'''
return False
UpperCamelCase : Any = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__( self , __UpperCAmelCase = 8 , __UpperCAmelCase = 0.7_5 ):
'''simple docstring'''
__UpperCamelCase = initial_block_size
__UpperCamelCase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__UpperCamelCase = capacity_factor
__UpperCamelCase = 0
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return hash(__UpperCAmelCase ) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets[ind]
if not stored:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
self._len += 1
return True
elif stored.key == key:
__UpperCamelCase = _Item(__UpperCAmelCase , __UpperCAmelCase )
return True
else:
return False
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__UpperCAmelCase )
def UpperCAmelCase ( self ):
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
__UpperCamelCase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._buckets
__UpperCamelCase = [None] * new_size
__UpperCamelCase = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def UpperCAmelCase ( self ):
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self._get_bucket_index(__UpperCAmelCase )
for _ in range(len(self._buckets ) ):
yield ind
__UpperCamelCase = self._get_next_ind(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
if self._try_set(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
break
def __setitem__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__UpperCAmelCase , __UpperCAmelCase )
def __delitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
raise KeyError(__UpperCAmelCase )
if item is _deleted:
continue
if item.key == key:
__UpperCamelCase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , __UpperCAmelCase ):
'''simple docstring'''
for ind in self._iterate_buckets(__UpperCAmelCase ):
__UpperCamelCase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self._len
def __iter__( self ):
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
'''simple docstring'''
__UpperCamelCase = ' ,'.join(
F'{item.key}: {item.val}' for item in self._buckets if item )
return F'HashMap({val_string})'
| 316 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=13 , UpperCAmelCase_ : Optional[int]=32 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Dict=[1, 2, 1] , UpperCAmelCase_ : str=[2, 2, 4] , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : List[Any]=2.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-5 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=10 , UpperCAmelCase_ : List[Any]=8 , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : int = num_channels
__UpperCAmelCase : int = embed_dim
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : int = num_heads
__UpperCAmelCase : List[str] = window_size
__UpperCAmelCase : List[str] = mlp_ratio
__UpperCAmelCase : List[Any] = qkv_bias
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = drop_path_rate
__UpperCAmelCase : int = hidden_act
__UpperCAmelCase : Optional[Any] = use_absolute_embeddings
__UpperCAmelCase : List[str] = patch_norm
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : str = is_training
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Union[str, Any] = type_sequence_label_size
__UpperCAmelCase : int = encoder_stride
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : List[str] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = SwinvaModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase_ )
__UpperCAmelCase : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] ):
"""simple docstring"""
__UpperCAmelCase : Dict = SwinvaForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Tuple = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCAmelCase : Dict = 1
__UpperCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = self.type_sequence_label_size
__UpperCAmelCase : Dict = SwinvaForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
__UpperCAmelCase : Dict = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Dict = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = SwinvaModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=37 )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(UpperCAmelCase_ )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Dict = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : str = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Union[str, Any] = outputs.attentions
__UpperCAmelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[str] = config.window_size**2
__UpperCAmelCase : Union[str, Any] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
# Check attention is always last and order is fine
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Dict = True
__UpperCAmelCase : Dict = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
__UpperCAmelCase : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__UpperCAmelCase : Optional[Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase_ ) )
__UpperCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
__UpperCAmelCase : Dict = outputs.hidden_states
__UpperCAmelCase : Dict = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
# Swinv2 has a different seq_length
__UpperCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCAmelCase : str = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = reshaped_hidden_states[0].shape
__UpperCAmelCase : Dict = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : str = 3
__UpperCAmelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : List[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width) )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : int = SwinvaModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Any = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
__UpperCAmelCase : str = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
UpperCAmelCase_ )
__UpperCAmelCase : List[str] = self.default_image_processor
__UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
__UpperCAmelCase : Any = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
__UpperCAmelCase : Dict = model(**UpperCAmelCase_ )
# verify the logits
__UpperCAmelCase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
__UpperCAmelCase : str = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 37 |
'''simple docstring'''
from heapq import heappop, heappush
import numpy as np
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ):
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = grid.shape
__UpperCAmelCase : List[str] = [-1, 1, 0, 0]
__UpperCAmelCase : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__UpperCAmelCase , __UpperCAmelCase : Tuple = [(0, source)], set()
__UpperCAmelCase : Any = np.full((rows, cols), np.inf )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : Union[str, Any] = np.empty((rows, cols), dtype=_UpperCAmelCase )
__UpperCAmelCase : Any = None
while queue:
((__UpperCAmelCase) , (__UpperCAmelCase)) : Optional[Any] = heappop(_UpperCAmelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__UpperCAmelCase : int = []
while (x, y) != source:
path.append((x, y) )
__UpperCAmelCase , __UpperCAmelCase : Tuple = predecessors[x, y]
path.append(_UpperCAmelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(_UpperCAmelCase ) ):
__UpperCAmelCase , __UpperCAmelCase : int = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__UpperCAmelCase : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(_UpperCAmelCase, (dist + 1, (nx, ny)) )
__UpperCAmelCase : List[str] = dist + 1
__UpperCAmelCase : int = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 | 1 |
'''simple docstring'''
def snake_case_ (_a : Tuple ):
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = []
UpperCAmelCase = [1] * len(_a )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_a )
print(max(_a ) )
# Adjacency list of Graph
A ={0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 34 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _a , _a):
# Load checkpoint
SCREAMING_SNAKE_CASE : int = torch.load(_a , map_location="cpu")
SCREAMING_SNAKE_CASE : Dict = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
SCREAMING_SNAKE_CASE : Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
SCREAMING_SNAKE_CASE : List[str] = v
else:
SCREAMING_SNAKE_CASE : int = v
SCREAMING_SNAKE_CASE : int = chkpt["params"]
SCREAMING_SNAKE_CASE : Union[str, Any] = {n: v for n, v in config.items() if not isinstance(_a , (torch.FloatTensor, numpy.ndarray))}
SCREAMING_SNAKE_CASE : List[Any] = chkpt["dico_word2id"]
SCREAMING_SNAKE_CASE : List[Any] = {s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@" , ""): i for s, i in vocab.items()}
# Save pytorch-model
SCREAMING_SNAKE_CASE : Tuple = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
SCREAMING_SNAKE_CASE : Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
SCREAMING_SNAKE_CASE : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"Save PyTorch model to {pytorch_weights_dump_path}")
torch.save(_a , _a)
print(f"Save configuration file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
print(f"Save vocab file to {pytorch_config_dump_path}")
with open(_a , "w" , encoding="utf-8") as f:
f.write(json.dumps(_a , indent=2) + "\n")
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 76 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _a ( unittest.TestCase ):
A = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Tuple = TextaTextGenerationPipeline(model=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ )
return generator, ["Something to write", "Something else"]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCAmelCase_: Tuple = generator("""Something there""" )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
UpperCAmelCase_: str = generator(["""This is great !""", """Something else"""], num_return_sequences=2, do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}],
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}],
], )
UpperCAmelCase_: Optional[int] = generator(
["""This is great !""", """Something else"""], num_return_sequences=2, batch_size=2, do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}],
[{"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}, {"""generated_text""": ANY(SCREAMING_SNAKE_CASE_ )}],
], )
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
generator(4 )
@require_torch
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = pipeline("""text2text-generation""", model="""patrickvonplaten/t5-tiny-random""", framework="""pt""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase_: Optional[int] = generator("""Something there""", do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{"""generated_text""": """"""}] )
UpperCAmelCase_: int = 3
UpperCAmelCase_: Dict = generator(
"""Something there""", num_return_sequences=SCREAMING_SNAKE_CASE_, num_beams=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: int = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = generator("""This is a test""", do_sample=SCREAMING_SNAKE_CASE_, num_return_sequences=2, return_tensors=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
], )
UpperCAmelCase_: List[Any] = generator.model.config.eos_token_id
UpperCAmelCase_: Optional[Any] = """<pad>"""
UpperCAmelCase_: List[Any] = generator(
["""This is a test""", """This is a second test"""], do_sample=SCREAMING_SNAKE_CASE_, num_return_sequences=2, batch_size=2, return_tensors=SCREAMING_SNAKE_CASE_, )
self.assertEqual(
SCREAMING_SNAKE_CASE_, [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
], )
@require_tf
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: int = pipeline("""text2text-generation""", model="""patrickvonplaten/t5-tiny-random""", framework="""tf""" )
# do_sample=False necessary for reproducibility
UpperCAmelCase_: Tuple = generator("""Something there""", do_sample=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_, [{"""generated_text""": """"""}] )
| 82 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
A: Optional[Any] = pd.read_csv("sample_data.csv", header=None)
A: List[str] = df.shape[:1][0]
# If you're using some other dataset input the target column
A: Optional[int] = df.iloc[:, 1:2]
A: int = actual_data.values.reshape(len_data, 1)
A: List[Any] = MinMaxScaler().fit_transform(actual_data)
A: Union[str, Any] = 1_0
A: str = 5
A: Union[str, Any] = 2_0
A: Union[str, Any] = len_data - periods * look_back
A: Optional[int] = actual_data[:division]
A: Tuple = actual_data[division - look_back :]
A , A: int = [], []
A , A: Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
A: Optional[int] = np.array(train_x)
A: Optional[Any] = np.array(test_x)
A: int = np.array([list(i.ravel()) for i in train_y])
A: str = np.array([list(i.ravel()) for i in test_y])
A: Dict = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
A: int = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
A: List[str] = model.predict(x_test)
| 109 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 0 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=3 , lowerCAmelCase__=32 , lowerCAmelCase__=3 , lowerCAmelCase__=10 , lowerCAmelCase__=[10, 20, 30, 40] , lowerCAmelCase__=[1, 1, 2, 1] , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=3 , lowerCAmelCase__=None , ) -> Union[str, Any]:
__magic_name__ : List[Any] = parent
__magic_name__ : str = batch_size
__magic_name__ : str = image_size
__magic_name__ : int = num_channels
__magic_name__ : Union[str, Any] = embeddings_size
__magic_name__ : List[str] = hidden_sizes
__magic_name__ : int = depths
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : Dict = hidden_act
__magic_name__ : List[str] = num_labels
__magic_name__ : List[Any] = scope
__magic_name__ : Union[str, Any] = len(__UpperCAmelCase )
def __magic_name__ ( self ) -> Any:
__magic_name__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[Any] = None
if self.use_labels:
__magic_name__ : Any = ids_tensor([self.batch_size] , self.num_labels )
__magic_name__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> Tuple:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
__magic_name__ : List[str] = TFRegNetModel(config=__UpperCAmelCase )
__magic_name__ : List[Any] = model(__UpperCAmelCase , training=__UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : str = self.num_labels
__magic_name__ : Any = TFRegNetForImageClassification(__UpperCAmelCase )
__magic_name__ : int = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self ) -> str:
__magic_name__ : Tuple = self.prepare_config_and_inputs()
__magic_name__ ,__magic_name__ ,__magic_name__ : List[str] = config_and_inputs
__magic_name__ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
lowercase__ : Any = (
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
lowercase__ : List[str] = False
lowercase__ : List[Any] = False
lowercase__ : str = False
lowercase__ : str = False
lowercase__ : Any = False
def __magic_name__ ( self ) -> int:
__magic_name__ : List[Any] = TFRegNetModelTester(self )
__magic_name__ : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase )
def __magic_name__ ( self ) -> Dict:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Union[str, Any]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def __magic_name__ ( self ) -> Optional[int]:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def __magic_name__ ( self ) -> Optional[Any]:
pass
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ ,__magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(__UpperCAmelCase )
__magic_name__ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Dict = [*signature.parameters.keys()]
__magic_name__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __magic_name__ ( self ) -> Any:
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__magic_name__ : Dict = model_class(__UpperCAmelCase )
__magic_name__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase )
__magic_name__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__magic_name__ : Dict = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__magic_name__ ,__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Tuple = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__magic_name__ : Dict = layer_type
__magic_name__ : List[str] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__magic_name__ : Any = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( self ) -> Dict:
__magic_name__ ,__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__={} ):
__magic_name__ : Optional[int] = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
__magic_name__ : Union[str, Any] = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCAmelCase , __UpperCAmelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
F' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
__magic_name__ : Dict = model_class(__UpperCAmelCase )
__magic_name__ : List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__ : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__ : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__magic_name__ : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__magic_name__ : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
__magic_name__ : int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"""output_hidden_states""": True} )
__magic_name__ : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__magic_name__ : Optional[int] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {"""output_hidden_states""": True} )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
@slow
def __magic_name__ ( self ) -> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Dict = TFRegNetModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Tuple:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Tuple = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__magic_name__ : Dict = self.default_image_processor
__magic_name__ : Dict = prepare_img()
__magic_name__ : int = image_processor(images=__UpperCAmelCase , return_tensors="""tf""" )
# forward pass
__magic_name__ : Optional[Any] = model(**__UpperCAmelCase , training=__UpperCAmelCase )
# verify the logits
__magic_name__ : Any = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCAmelCase )
__magic_name__ : str = tf.constant([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
| 354 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_A, _A ) ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__magic_name__ : str = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_A )
try:
if dataset.shape[1] != value_array.shape[1]:
__magic_name__ : Optional[Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__magic_name__ : List[Any] = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_A )
__magic_name__ : Dict = []
for value in value_array:
__magic_name__ : Tuple = euclidean(_A, dataset[0] )
__magic_name__ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
__magic_name__ : Any = euclidean(_A, _A )
if dist > temp_dist:
__magic_name__ : Dict = temp_dist
__magic_name__ : Any = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return np.dot(_A, _A ) / (norm(_A ) * norm(_A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError('''List is empty''' )
lowerCAmelCase : Tuple = sum(_snake_case ) / len(_snake_case ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
a_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
a_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
a_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = False , ) -> Optional[int]:
if label_map is not None:
for old_id, new_id in label_map.items():
_A = new_id
# turn into Numpy arrays
_A = np.array(__lowercase )
_A = np.array(__lowercase )
if reduce_labels:
_A = 255
_A = label - 1
_A = 255
_A = label != ignore_index
_A = np.not_equal(__lowercase , __lowercase )
_A = pred_label[mask]
_A = np.array(__lowercase )[mask]
_A = pred_label[pred_label == label]
_A = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0]
_A = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0]
_A = np.histogram(__lowercase , bins=__lowercase , range=(0, num_labels - 1) )[0]
_A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = False , ) -> Any:
_A = np.zeros((num_labels,) , dtype=np.floataa )
_A = np.zeros((num_labels,) , dtype=np.floataa )
_A = np.zeros((num_labels,) , dtype=np.floataa )
_A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(__lowercase , __lowercase ):
_A , _A , _A , _A = intersect_and_union(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = False , ) -> Union[str, Any]:
_A , _A , _A , _A = total_intersect_and_union(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# compute metrics
_A = {}
_A = total_area_intersect.sum() / total_area_label.sum()
_A = total_area_intersect / total_area_union
_A = total_area_intersect / total_area_label
_A = np.nanmean(__lowercase )
_A = np.nanmean(__lowercase )
_A = all_acc
_A = iou
_A = acc
if nan_to_num is not None:
_A = {metric: np.nan_to_num(__lowercase , nan=__lowercase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class snake_case ( datasets.Metric):
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) , reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] , )
def a_ ( self : Union[str, Any] , a__ : str , a__ : str , a__ : int , a__ : bool , a__ : Optional[int] = None , a__ : Optional[Dict[int, int]] = None , a__ : bool = False , ) -> Dict:
'''simple docstring'''
_A = mean_iou(
results=a__ , gt_seg_maps=a__ , num_labels=a__ , ignore_index=a__ , nan_to_num=a__ , label_map=a__ , reduce_labels=a__ , )
return iou_result | 163 |
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowercase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowercase , sep="\n" )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
a_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset) | 163 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_snake_case = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_snake_case = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_lowerCAmelCase : List[str] = bs[:]
_lowerCAmelCase : int = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
_lowerCAmelCase : Optional[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = set()
_lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : str = char
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="replace", __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else bos_token
_lowerCAmelCase : int = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else eos_token
_lowerCAmelCase : Union[str, Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else sep_token
_lowerCAmelCase : Any = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else cls_token
_lowerCAmelCase : Dict = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else unk_token
_lowerCAmelCase : List[str] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Union[str, Any] = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
super().__init__(
errors=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, add_prefix_space=__a, **__a, )
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : int = json.load(__a)
_lowerCAmelCase : List[Any] = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Any = errors # how to handle errors in decoding
_lowerCAmelCase : int = bytes_to_unicode()
_lowerCAmelCase : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Any = [tuple(merge.split()) for merge in bpe_merges]
_lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_lowerCAmelCase : Tuple = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+")
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Any = tuple(__a)
_lowerCAmelCase : List[str] = get_pairs(__a)
if not pairs:
return token
while True:
_lowerCAmelCase : List[Any] = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : str = bigram
_lowerCAmelCase : Any = []
_lowerCAmelCase : Any = 0
while i < len(__a):
try:
_lowerCAmelCase : List[str] = word.index(__a, __a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
_lowerCAmelCase : Tuple = j
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : int = tuple(__a)
_lowerCAmelCase : List[Any] = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Any = get_pairs(__a)
_lowerCAmelCase : Union[str, Any] = " ".join(__a)
_lowerCAmelCase : List[str] = word
return word
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = []
for token in re.findall(self.pat, __a):
_lowerCAmelCase : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8")) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a).split(" "))
return bpe_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a)
_lowerCAmelCase : Optional[Any] = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
return text
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : int = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : List[str] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : int = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Dict = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case__ ( self, __a, __a=False, **__a):
'''simple docstring'''
_lowerCAmelCase : str = kwargs.pop("add_prefix_space", self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__a) > 0 and not text[0].isspace()):
_lowerCAmelCase : Tuple = " " + text
return (text, kwargs)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text)
else:
# Generated responses should contain them already.
inputs.append(__a)
_lowerCAmelCase : Optional[Any] = " ".join(__a)
_lowerCAmelCase : Tuple = self.encode(__a)
if len(__a) > self.model_max_length:
_lowerCAmelCase : str = input_ids[-self.model_max_length :]
logger.warning(f"Trimmed input from conversation as it was longer than {self.model_max_length} tokens.")
return input_ids
| 36 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'wav2vec2'
def __init__( self, __a=32, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=0.1, __a=0.0, __a=0.0, __a=0.1, __a=0.1, __a=0.02, __a=1E-5, __a="group", __a="gelu", __a=(512, 512, 512, 512, 512, 512, 512), __a=(5, 2, 2, 2, 2, 2, 2), __a=(10, 3, 3, 3, 3, 2, 2), __a=False, __a=128, __a=16, __a=False, __a=True, __a=0.05, __a=10, __a=2, __a=0.0, __a=10, __a=0, __a=320, __a=2, __a=0.1, __a=100, __a=256, __a=256, __a=0.1, __a="sum", __a=False, __a=False, __a=256, __a=(512, 512, 512, 512, 1500), __a=(5, 3, 3, 1, 1), __a=(1, 2, 3, 1, 1), __a=512, __a=0, __a=1, __a=2, __a=False, __a=3, __a=2, __a=3, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a, pad_token_id=__a, bos_token_id=__a, eos_token_id=__a)
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : Union[str, Any] = feat_extract_activation
_lowerCAmelCase : Optional[Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : List[str] = conv_bias
_lowerCAmelCase : str = num_conv_pos_embeddings
_lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim)
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : Optional[Any] = hidden_dropout
_lowerCAmelCase : List[str] = attention_dropout
_lowerCAmelCase : Tuple = activation_dropout
_lowerCAmelCase : int = feat_proj_dropout
_lowerCAmelCase : List[str] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : Optional[Any] = do_stable_layer_norm
_lowerCAmelCase : Any = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`.")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : str = apply_spec_augment
_lowerCAmelCase : Optional[Any] = mask_time_prob
_lowerCAmelCase : Optional[int] = mask_time_length
_lowerCAmelCase : List[str] = mask_time_min_masks
_lowerCAmelCase : Optional[int] = mask_feature_prob
_lowerCAmelCase : Optional[int] = mask_feature_length
_lowerCAmelCase : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Union[str, Any] = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Optional[int] = contrastive_logits_temperature
_lowerCAmelCase : Optional[int] = feat_quantizer_dropout
_lowerCAmelCase : Optional[int] = num_negatives
_lowerCAmelCase : Union[str, Any] = codevector_dim
_lowerCAmelCase : Any = proj_codevector_dim
_lowerCAmelCase : Optional[int] = diversity_loss_weight
# ctc loss
_lowerCAmelCase : Tuple = ctc_loss_reduction
_lowerCAmelCase : Tuple = ctc_zero_infinity
# adapter
_lowerCAmelCase : List[Any] = add_adapter
_lowerCAmelCase : List[str] = adapter_kernel_size
_lowerCAmelCase : str = adapter_stride
_lowerCAmelCase : List[str] = num_adapter_layers
_lowerCAmelCase : str = output_hidden_size or hidden_size
_lowerCAmelCase : Tuple = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : str = list(__a)
_lowerCAmelCase : Union[str, Any] = list(__a)
_lowerCAmelCase : List[str] = list(__a)
_lowerCAmelCase : Tuple = xvector_output_dim
@property
def snake_case__ ( self):
'''simple docstring'''
return functools.reduce(operator.mul, self.conv_stride, 1)
| 36 | 1 |
def A (__A : str , __A : str ) -> Optional[Any]:
"""simple docstring"""
assert x is not None
assert y is not None
UpperCAmelCase_ = len(__A )
UpperCAmelCase_ = len(__A )
# declaring the array for storing the dp values
UpperCAmelCase_ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
UpperCAmelCase_ = 1 if x[i - 1] == y[j - 1] else 0
UpperCAmelCase_ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = m, n
while i > 0 and j > 0:
UpperCAmelCase_ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
UpperCAmelCase_ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
snake_case_ : Tuple = "AGGTAB"
snake_case_ : Optional[int] = "GXTXAYB"
snake_case_ : int = 4
snake_case_ : Optional[Any] = "GTAB"
snake_case_ , snake_case_ : List[Any] = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 7 |
from maths.prime_factors import prime_factors
def A (__A : int ) -> int:
"""simple docstring"""
if not isinstance(__A , __A ):
UpperCAmelCase_ = F"""Input value of [number={number}] must be an integer"""
raise TypeError(__A )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__A ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7 | 1 |
def lowerCamelCase__ ( _a , _a):
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase)
print("The following activities are selected:")
# The first activity is always selected
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
print(__lowerCAmelCase , end=",")
# Consider rest of the activities
for j in range(__lowerCAmelCase):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(__lowerCAmelCase , end=",")
SCREAMING_SNAKE_CASE : int = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = [1, 3, 0, 5, 8, 5]
a_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish) | 76 |
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {}
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : int=1 ) ->Optional[Any]:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_UpperCAmelCase : Dict = [[w, v]]
if not self.graph.get(lowerCamelCase__ ):
_UpperCAmelCase : str = []
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any]=-2 , lowerCamelCase__ : Optional[int]=-1 ) ->int:
'''simple docstring'''
if s == d:
return []
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[int] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any=-1 ) ->str:
'''simple docstring'''
if c == -1:
_UpperCAmelCase : str = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase : Tuple = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any]=-2 ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = deque()
_UpperCAmelCase : int = []
if s == -2:
_UpperCAmelCase : str = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
_UpperCAmelCase : int = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any=-2 ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : List[str] = []
if s == -2:
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = s
_UpperCAmelCase : Tuple = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : str = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Dict = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : List[str] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return sorted_nodes
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
_UpperCAmelCase : str = []
_UpperCAmelCase : int = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : str = -2
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Union[str, Any] = s
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : List[str] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Dict = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[str] = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Tuple = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : Any = s
_UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Tuple = s
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Optional[int] = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Tuple = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : Dict = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = s
_UpperCAmelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple=-2 , lowerCamelCase__ : Union[str, Any]=-1 ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = time()
return end - begin
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict=-2 ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = time()
self.bfs(lowerCamelCase__ )
_UpperCAmelCase : Tuple = time()
return end - begin
class lowerCAmelCase__ :
def __init__( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = {}
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple=1 ) ->int:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_UpperCAmelCase : Dict = [[w, v]]
# add the other way
if self.graph.get(lowerCamelCase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_UpperCAmelCase : Tuple = [[w, u]]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowerCamelCase__ )
# the other way round
if self.graph.get(lowerCamelCase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str]=-2 , lowerCamelCase__ : List[str]=-1 ) ->List[Any]:
'''simple docstring'''
if s == d:
return []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[Any] = []
if s == -2:
_UpperCAmelCase : List[Any] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowerCamelCase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : Optional[int] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return visited
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Dict=-1 ) ->str:
'''simple docstring'''
if c == -1:
_UpperCAmelCase : str = floor(random() * 1_00_00 ) + 10
for i in range(lowerCamelCase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
_UpperCAmelCase : str = floor(random() * c ) + 1
if n != i:
self.add_pair(lowerCamelCase__ , lowerCamelCase__ , 1 )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any]=-2 ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = deque()
_UpperCAmelCase : Optional[int] = []
if s == -2:
_UpperCAmelCase : Union[str, Any] = list(self.graph )[0]
d.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
while d:
_UpperCAmelCase : Any = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return len(self.graph[u] )
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = []
_UpperCAmelCase : str = []
_UpperCAmelCase : Optional[int] = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Any = -2
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : str = s
_UpperCAmelCase : int = False
_UpperCAmelCase : List[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : List[str] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Dict = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : Dict = s
_UpperCAmelCase : Dict = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return list(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Tuple = list(self.graph )[0]
stack.append(lowerCamelCase__ )
visited.append(lowerCamelCase__ )
_UpperCAmelCase : Dict = -2
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Dict = s
_UpperCAmelCase : Dict = False
_UpperCAmelCase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_UpperCAmelCase : Union[str, Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_UpperCAmelCase : int = len(lowerCamelCase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_UpperCAmelCase : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_UpperCAmelCase : List[Any] = True
if len(lowerCamelCase__ ) != 0:
_UpperCAmelCase : List[Any] = stack[len(lowerCamelCase__ ) - 1]
else:
_UpperCAmelCase : Optional[Any] = False
indirect_parents.append(lowerCamelCase__ )
_UpperCAmelCase : List[str] = s
_UpperCAmelCase : List[Any] = ss
# check if se have reached the starting point
if len(lowerCamelCase__ ) == 0:
return False
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
return list(self.graph )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=-2 , lowerCamelCase__ : Dict=-1 ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = time()
self.dfs(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = time()
return end - begin
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Any=-2 ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = time()
self.bfs(lowerCamelCase__ )
_UpperCAmelCase : Any = time()
return end - begin
| 234 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _a ( lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : List[Any] = ['''image_processor''', '''feature_extractor''']
SCREAMING_SNAKE_CASE_ : Any = '''TvltImageProcessor'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''TvltFeatureExtractor'''
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
super().__init__(image_processor=__snake_case ,feature_extractor=__snake_case )
_snake_case = image_processor
_snake_case = feature_extractor
def __call__( self ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
_snake_case = None
if images is not None:
_snake_case = self.image_processor(__snake_case ,mask_pixel=__snake_case ,*__snake_case ,**__snake_case )
if images_mixed is not None:
_snake_case = self.image_processor(__snake_case ,is_mixed=__snake_case ,*__snake_case ,**__snake_case )
if audio is not None:
_snake_case = self.feature_extractor(
__snake_case ,*__snake_case ,sampling_rate=__snake_case ,mask_audio=__snake_case ,**__snake_case )
_snake_case = {}
if audio is not None:
output_dict.update(__snake_case )
if images is not None:
output_dict.update(__snake_case )
if images_mixed_dict is not None:
output_dict.update(__snake_case )
return output_dict
@property
def _lowercase ( self ) -> Any:
_snake_case = self.image_processor.model_input_names
_snake_case = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 371 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : str = logging.get_logger(__name__)
UpperCamelCase_ : Optional[Any] = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Dict = """sew"""
def __init__( self ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=768 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=3_072 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.0_2 ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE="group" ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,_SCREAMING_SNAKE_CASE=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=0.0_5 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE="mean" ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=256 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=2 ,**_SCREAMING_SNAKE_CASE ,) -> str:
super().__init__(**_SCREAMING_SNAKE_CASE ,pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE )
_snake_case = hidden_size
_snake_case = feat_extract_norm
_snake_case = feat_extract_activation
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = list(_SCREAMING_SNAKE_CASE )
_snake_case = conv_bias
_snake_case = num_conv_pos_embeddings
_snake_case = num_conv_pos_embedding_groups
_snake_case = len(self.conv_dim )
_snake_case = num_hidden_layers
_snake_case = intermediate_size
_snake_case = squeeze_factor
_snake_case = hidden_act
_snake_case = num_attention_heads
_snake_case = hidden_dropout
_snake_case = attention_dropout
_snake_case = activation_dropout
_snake_case = feat_proj_dropout
_snake_case = final_dropout
_snake_case = layerdrop
_snake_case = layer_norm_eps
_snake_case = initializer_range
_snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_snake_case = apply_spec_augment
_snake_case = mask_time_prob
_snake_case = mask_time_length
_snake_case = mask_time_min_masks
_snake_case = mask_feature_prob
_snake_case = mask_feature_length
_snake_case = mask_feature_min_masks
# ctc loss
_snake_case = ctc_loss_reduction
_snake_case = ctc_zero_infinity
# sequence classification
_snake_case = use_weighted_layer_sum
_snake_case = classifier_proj_size
@property
def _lowercase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 142 | 0 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase , lowerCamelCase = emb.weight.shape
lowerCamelCase = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
lowerCamelCase = emb.weight.data
return lin_layer
def a__ ( snake_case__ ) -> List[Any]:
lowerCamelCase = torch.load(snake_case__ , map_location="""cpu""" )
lowerCamelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
lowerCamelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case__ )
lowerCamelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
lowerCamelCase = MaMaaaConfig(
vocab_size=snake_case__ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
lowerCamelCase = state_dict["""decoder.embed_tokens.weight"""]
lowerCamelCase = MaMaaaForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ , strict=snake_case__ )
lowerCamelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase : List[str] = parser.parse_args()
lowerCAmelCase : Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 291 |
"""simple docstring"""
from math import ceil
def a__ ( snake_case__ , snake_case__ ) -> Optional[int]:
lowerCamelCase = list(range(0 , snake_case__ ) )
lowerCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowerCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(snake_case__ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(snake_case__ )
# Missing blocks
lowerCamelCase = [i for i in blocks if i not in device_map_blocks]
lowerCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(snake_case__ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(snake_case__ ) )
if len(snake_case__ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(snake_case__ ) )
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = list(range(snake_case__ ) )
lowerCamelCase = int(ceil(n_layers / len(snake_case__ ) ) )
lowerCamelCase = [layers[i : i + n_blocks] for i in range(0 , snake_case__ , snake_case__ )]
return dict(zip(snake_case__ , snake_case__ ) )
| 291 | 1 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowerCamelCase : Optional[Any] = [
{"""dataset""": """wikipedia""", """config_name""": """20220301.de"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.en"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.it"""},
{"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""},
{"""dataset""": """snli""", """config_name""": """plain_text"""},
{"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""},
{"""dataset""": """wiki40b""", """config_name""": """en"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""},
{"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""},
{"""dataset""": """natural_questions""", """config_name""": """default"""},
]
def SCREAMING_SNAKE_CASE ( lowercase_=True ) -> List[Any]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=UpperCAmelCase__ ) )
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = None
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
A__ = dataset_module_factory(UpperCAmelCase__ , cache_dir=UpperCAmelCase__)
A__ = import_main_class(dataset_module.module_path , dataset=UpperCAmelCase__)
A__ = builder_cls(
cache_dir=UpperCAmelCase__ , config_name=UpperCAmelCase__ , hash=dataset_module.hash , )
A__ = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCAmelCase__).replace(os.sep , '''/'''),
config.DATASET_INFO_FILENAME,
])
A__ = cached_path(UpperCAmelCase__ , cache_dir=UpperCAmelCase__)
self.assertTrue(os.path.exists(UpperCAmelCase__))
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
A__ = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
A__ = dataset_module_factory('''wikipedia''' , cache_dir=lowercase_ )
A__ = import_main_class(dataset_module.module_path )
A__ = builder_cls(
cache_dir=lowercase_ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
A__ = None
builder_instance.download_and_prepare()
A__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = dataset_module_factory('''wikipedia''' , cache_dir=lowercase_ )
A__ = import_main_class(dataset_module.module_path , dataset=lowercase_ )
A__ = builder_cls(
cache_dir=lowercase_ , config_name='''20220301.frr''' , hash=dataset_module.hash , )
A__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase_ , lowercase_ )
assert "train" in ds
assert isinstance(ds['''train'''] , lowercase_ )
assert next(iter(ds['''train'''] ) )
| 231 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 231 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = CycleDiffusionPipeline
lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'negative_prompt',
'height',
'width',
'negative_prompt_embeds',
}
lowerCamelCase : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'source_prompt'} )
lowerCamelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCamelCase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
__lowerCamelCase : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=10_00 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__lowerCamelCase : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
__lowerCamelCase : Optional[int] = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ) -> List[Any]:
__lowerCamelCase : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = image / 2 + 0.5
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[Any] = self.get_dummy_components()
__lowerCamelCase : List[str] = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : Optional[Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : List[str] = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Dict = self.get_dummy_components()
for name, module in components.items():
if hasattr(SCREAMING_SNAKE_CASE_ , 'half' ):
__lowerCamelCase : Tuple = module.half()
__lowerCamelCase : Tuple = CycleDiffusionPipeline(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = output.images
__lowerCamelCase : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowercase_ ( self ) -> Tuple:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def lowercase_ ( self ) -> Tuple:
return super().test_inference_batch_single_identical()
@skip_mps
def lowercase_ ( self ) -> Any:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowercase_ ( self ) -> List[Any]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase_ ( self ) -> List[Any]:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__lowerCamelCase : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
__lowerCamelCase : Tuple = init_image.resize((5_12, 5_12) )
__lowerCamelCase : Optional[Any] = 'CompVis/stable-diffusion-v1-4'
__lowerCamelCase : List[str] = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='scheduler' )
__lowerCamelCase : int = CycleDiffusionPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : List[Any] = 'A black colored car'
__lowerCamelCase : Any = 'A blue colored car'
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : List[str] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowercase_ ( self ) -> int:
__lowerCamelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
__lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
__lowerCamelCase : Tuple = init_image.resize((5_12, 5_12) )
__lowerCamelCase : Dict = 'CompVis/stable-diffusion-v1-4'
__lowerCamelCase : Any = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ , subfolder='scheduler' )
__lowerCamelCase : Optional[int] = CycleDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Union[str, Any] = 'A black colored car'
__lowerCamelCase : Union[str, Any] = 'A blue colored car'
__lowerCamelCase : Any = torch.manual_seed(0 )
__lowerCamelCase : int = pipe(
prompt=SCREAMING_SNAKE_CASE_ , source_prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : Any = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 185 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a_ : List[str] = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a_ : Any = {
"yjernite/retribert-base-uncased": 5_1_2,
}
a_ : Tuple = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = RetriBertTokenizer
_lowerCAmelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Tuple:
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
_a = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __magic_name__ ) != do_lower_case
or normalizer_state.get('strip_accents' , __magic_name__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __magic_name__ ) != tokenize_chinese_chars
):
_a = getattr(__magic_name__ , normalizer_state.pop('type' ) )
_a = do_lower_case
_a = strip_accents
_a = tokenize_chinese_chars
_a = normalizer_class(**__magic_name__ )
_a = do_lower_case
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=None ) -> Union[str, Any]:
_a = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
_a = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 168 | 0 |
"""simple docstring"""
import re
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = split_input(_UpperCamelCase )
if upper:
__lowerCAmelCase = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
__lowerCAmelCase = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
return to_simple_case(_UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
try:
__lowerCAmelCase = to_simple_case(_UpperCamelCase )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , "_" )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
return to_complex_case(_UpperCamelCase , _UpperCamelCase , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 357 |
"""simple docstring"""
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Tuple = logging.get_logger(__name__)
A : Union[str, Any] = "https://openaipublic.azureedge.net/jukebox/models/"
A : Tuple = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
__lowerCAmelCase = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
__lowerCAmelCase = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
__lowerCAmelCase = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCAmelCase = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
__lowerCAmelCase = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
import re
__lowerCAmelCase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
__lowerCAmelCase = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
__lowerCAmelCase = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_conv_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_encoder_block_conv_in.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] )
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_encoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_encoder_block_proj_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_encoder_block_proj_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__lowerCAmelCase = re_encoder_block_proj_out.sub(_UpperCamelCase , _UpperCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_conv_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_decoder_block_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_decoder_block_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_decoder_block_proj_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_decoder_block_proj_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__lowerCAmelCase = re_decoder_block_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_conv_out.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__lowerCAmelCase = re_prior_cond_conv_out.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_resnet.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_resnet.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCAmelCase = {"1": 1, "3": 2}[groups[-2]]
__lowerCAmelCase = f"conditioner_blocks.upsampler.upsample_block.{block_index}."
__lowerCAmelCase = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowerCAmelCase = prefix + resnet_block
__lowerCAmelCase = re_prior_cond_resnet.sub(_UpperCamelCase , _UpperCamelCase )
elif re_prior_cond_proj_in.fullmatch(_UpperCamelCase ):
__lowerCAmelCase = re_prior_cond_proj_in.match(_UpperCamelCase )
__lowerCAmelCase = regex_match.groups()
__lowerCAmelCase = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__lowerCAmelCase = re_prior_cond_proj_in.sub(_UpperCamelCase , _UpperCamelCase )
# keep original key
else:
__lowerCAmelCase = original_key
__lowerCAmelCase = replace_key(_UpperCamelCase )
if f"{key_prefix}.{key}" not in model_state_dict or key is None:
print(f"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape:
__lowerCAmelCase = model_state_dict[f"{key_prefix}.{key}"]
print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__lowerCAmelCase = original_key
__lowerCAmelCase = original_key
__lowerCAmelCase = value
return new_dict
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__lowerCAmelCase = requests.get(f"{PREFIX}{file}" , allow_redirects=_UpperCamelCase )
os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=_UpperCamelCase )
open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
__lowerCAmelCase = MODEL_MAPPING[model_name.split("/" )[-1]]
__lowerCAmelCase = JukeboxConfig.from_pretrained(_UpperCamelCase )
__lowerCAmelCase = JukeboxModel(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = {}
for i, dict_name in enumerate(_UpperCamelCase ):
__lowerCAmelCase = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
__lowerCAmelCase = {}
for k in old_dic.keys():
if k.endswith(".b" ):
__lowerCAmelCase = old_dic[k]
elif k.endswith(".w" ):
__lowerCAmelCase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCAmelCase = old_dic[k]
else:
__lowerCAmelCase = old_dic[k]
__lowerCAmelCase = "vqvae" if i == 0 else f"priors.{3 - i}"
__lowerCAmelCase = fix_jukebox_keys(_UpperCamelCase , model.state_dict() , _UpperCamelCase , _UpperCamelCase )
weight_dict.append(_UpperCamelCase )
__lowerCAmelCase = weight_dict.pop(0 )
model.vqvae.load_state_dict(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
with open(f"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_UpperCamelCase , _UpperCamelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
return weight_dict
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
A : Union[str, Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 259 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
UpperCamelCase__ = logging.getLogger(__name__)
@dataclass
class a__ :
_a : str
_a : List[str]
_a : Optional[List[str]]
@dataclass
class a__ :
_a : List[int]
_a : List[int]
_a : Optional[List[int]] = None
_a : Optional[List[int]] = None
class a__ ( snake_case__ ):
_a : Optional[int] = """train"""
_a : int = """dev"""
_a : Tuple = """test"""
class a__ :
@staticmethod
def __SCREAMING_SNAKE_CASE( _A , _A ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __SCREAMING_SNAKE_CASE( _A ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __SCREAMING_SNAKE_CASE( _A , _A , _A , _A , _A=False , _A="[CLS]" , _A=1 , _A="[SEP]" , _A=False , _A=False , _A=0 , _A=0 , _A=-1_0_0 , _A=0 , _A=True , ):
"""simple docstring"""
__lowerCAmelCase = {label: i for i, label in enumerate(_A )}
__lowerCAmelCase = []
for ex_index, example in enumerate(_A ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" , _A , len(_A ) )
__lowerCAmelCase = []
__lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
__lowerCAmelCase = tokenizer.tokenize(_A )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_A ) > 0:
tokens.extend(_A )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_A ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(_A ) > max_seq_length - special_tokens_count:
__lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
__lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__lowerCAmelCase = [sequence_a_segment_id] * len(_A )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__lowerCAmelCase = [cls_token] + tokens
__lowerCAmelCase = [pad_token_label_id] + label_ids
__lowerCAmelCase = [cls_token_segment_id] + segment_ids
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(_A )
# Zero-pad up to the sequence length.
__lowerCAmelCase = max_seq_length - len(_A )
if pad_on_left:
__lowerCAmelCase = ([pad_token] * padding_length) + input_ids
__lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
__lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
assert len(_A ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(_A ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(_A ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(_A ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(_A ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(_A ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=_A , attention_mask=_A , token_type_ids=_A , label_ids=_A ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a__ ( snake_case__ ):
_a : List[InputFeatures]
_a : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
"""simple docstring"""
__lowerCAmelCase = os.path.join(
_A , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(_A ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__lowerCAmelCase = torch.load(_A )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _A )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a__ :
_a : List[InputFeatures]
_a : int = -1_0_0
def __init__( self , _A , _A , _A , _A , _A , _A = None , _A=False , _A = Split.train , ):
"""simple docstring"""
__lowerCAmelCase = token_classification_task.read_examples_from_file(_A , _A )
# TODO clean up all this to leverage built-in features of tokenizers
__lowerCAmelCase = token_classification_task.convert_examples_to_features(
_A , _A , _A , _A , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_A , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__lowerCAmelCase = tf.data.Dataset.from_generator(
_A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__lowerCAmelCase = tf.data.Dataset.from_generator(
_A , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
return self.features[i]
| 92 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = 0
if start < end:
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase , __lowerCAmelCase = _in_place_partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , p - 1 )
count += _in_place_quick_sort(SCREAMING_SNAKE_CASE_ , p + 1 , SCREAMING_SNAKE_CASE_ )
return count
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
__lowerCAmelCase = 0
__lowerCAmelCase = randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = a[end]
__lowerCAmelCase = a[pivot]
__lowerCAmelCase = temp
__lowerCAmelCase = start - 1
for index in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
__lowerCAmelCase = new_pivot_index + 1
__lowerCAmelCase = a[new_pivot_index]
__lowerCAmelCase = a[index]
__lowerCAmelCase = temp
__lowerCAmelCase = a[new_pivot_index + 1]
__lowerCAmelCase = a[end]
__lowerCAmelCase = temp
return new_pivot_index + 1, count
UpperCamelCase__ = TemporaryFile()
UpperCamelCase__ = 100 # 1000 elements are to be sorted
UpperCamelCase__ , UpperCamelCase__ = 0, 1 # mean and standard deviation
UpperCamelCase__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
UpperCamelCase__ = np.load(outfile)
UpperCamelCase__ = len(M) - 1
UpperCamelCase__ = _in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 92 | 1 |
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase : int =(
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
_lowercase : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
a__ : List[Any] = """https://pypi.org/pypi/diffusers/json"""
a__ : Tuple = json.loads(request.urlopen(_lowercase).read())["""releases"""].keys()
return sorted(_lowercase , key=lambda _lowercase: version.Version(_lowercase))
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowercase)
os.makedirs(_lowercase , exist_ok=_lowercase)
a__ : Tuple = Path(_lowercase) / """__init__.py"""
if not init_path.exists():
init_path.touch()
def lowerCAmelCase_ ( _lowercase : Union[str, os.PathLike]) -> Optional[Any]:
"""simple docstring"""
init_hf_modules()
a__ : Optional[int] = Path(_lowercase) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent)
os.makedirs(_lowercase , exist_ok=_lowercase)
a__ : Any = dynamic_module_path / """__init__.py"""
if not init_path.exists():
init_path.touch()
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
with open(_lowercase , """r""" , encoding="""utf-8""") as f:
a__ : Union[str, Any] = f.read()
# Imports of the form `import .xxx`
a__ : Optional[Any] = re.findall("""^\s*import\s+\.(\S+)\s*$""" , _lowercase , flags=re.MULTILINE)
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("""^\s*from\s+\.(\S+)\s+import""" , _lowercase , flags=re.MULTILINE)
# Unique-ify
return list(set(_lowercase))
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Dict:
"""simple docstring"""
a__ : Dict = False
a__ : str = [module_file]
a__ : List[Any] = []
# Let's recurse through all relative imports
while not no_change:
a__ : Optional[Any] = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowercase))
a__ : Dict = Path(_lowercase).parent
a__ : Any = [str(module_path / m) for m in new_imports]
a__ : Any = [f for f in new_import_files if f not in all_relative_imports]
a__ : List[Any] = [F'''{f}.py''' for f in new_import_files]
a__ : List[Any] = len(_lowercase) == 0
all_relative_imports.extend(_lowercase)
return all_relative_imports
def lowerCAmelCase_ ( _lowercase : Optional[int]) -> Any:
"""simple docstring"""
with open(_lowercase , """r""" , encoding="""utf-8""") as f:
a__ : Optional[Any] = f.read()
# Imports of the form `import xxx`
a__ : Optional[int] = re.findall("""^\s*import\s+(\S+)\s*$""" , _lowercase , flags=re.MULTILINE)
# Imports of the form `from xxx import yyy`
imports += re.findall("""^\s*from\s+(\S+)\s+import""" , _lowercase , flags=re.MULTILINE)
# Only keep the top-level module
a__ : Any = [imp.split(""".""")[0] for imp in imports if not imp.startswith(""".""")]
# Unique-ify and test we got them all
a__ : Optional[Any] = list(set(_lowercase))
a__ : Union[str, Any] = []
for imp in imports:
try:
importlib.import_module(_lowercase)
except ImportError:
missing_packages.append(_lowercase)
if len(_lowercase) > 0:
raise ImportError(
"""This modeling file requires the following packages that were not found in your environment: """
F'''{', '.join(_lowercase)}. Run `pip install {' '.join(_lowercase)}`''')
return get_relative_imports(_lowercase)
def lowerCAmelCase_ ( _lowercase : int , _lowercase : Tuple) -> Union[str, Any]:
"""simple docstring"""
a__ : Dict = module_path.replace(os.path.sep , """.""")
a__ : List[Any] = importlib.import_module(_lowercase)
if class_name is None:
return find_pipeline_class(_lowercase)
return getattr(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
a__ : Any = dict(inspect.getmembers(_lowercase , inspect.isclass))
a__ : str = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowercase)
and cls.__module__.split(""".""")[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'''Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'''
F''' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'''
F''' {loaded_module}.''')
a__ : str = cls
return pipeline_class
def lowerCAmelCase_ ( _lowercase : Union[str, os.PathLike] , _lowercase : str , _lowercase : Optional[Union[str, os.PathLike]] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[Dict[str, str]] = None , _lowercase : Optional[Union[bool, str]] = None , _lowercase : Optional[str] = None , _lowercase : bool = False , ) -> Optional[int]:
"""simple docstring"""
a__ : Union[str, Any] = str(_lowercase)
a__ : Union[str, Any] = os.path.join(_lowercase , _lowercase)
if os.path.isfile(_lowercase):
a__ : Dict = module_file_or_url
a__ : List[str] = """local"""
elif pretrained_model_name_or_path.count("""/""") == 0:
a__ : Optional[Any] = get_diffusers_versions()
# cut ".dev0"
a__ : Union[str, Any] = """v""" + """.""".join(__version__.split(""".""")[:3])
# retrieve github version that matches
if revision is None:
a__ : List[str] = latest_version if latest_version[1:] in available_versions else """main"""
logger.info(F'''Defaulting to latest_version: {revision}.''')
elif revision in available_versions:
a__ : str = F'''v{revision}'''
elif revision == "main":
a__ : List[str] = revision
else:
raise ValueError(
F'''`custom_revision`: {revision} does not exist. Please make sure to choose one of'''
F''' {', '.join(available_versions + ['main'])}.''')
# community pipeline on GitHub
a__ : Any = COMMUNITY_PIPELINES_URL.format(revision=_lowercase , pipeline=_lowercase)
try:
a__ : Optional[int] = cached_download(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
a__ : Any = """git"""
a__ : int = pretrained_model_name_or_path + """.py"""
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''')
raise
else:
try:
# Load from URL or cache if already cached
a__ : Any = hf_hub_download(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
a__ : Optional[Any] = os.path.join("""local""" , """--""".join(pretrained_model_name_or_path.split("""/""")))
except EnvironmentError:
logger.error(F'''Could not locate the {module_file} inside {pretrained_model_name_or_path}.''')
raise
# Check we have all the requirements in our environment
a__ : List[str] = check_imports(_lowercase)
# Now we move the module inside our cached dynamic modules.
a__ : int = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowercase)
a__ : Tuple = Path(_lowercase) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowercase , submodule_path / module_file)
for module_needed in modules_needed:
a__ : Dict = F'''{module_needed}.py'''
shutil.copy(os.path.join(_lowercase , _lowercase) , submodule_path / module_needed)
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowercase , _lowercase):
a__ : Optional[Any] = use_auth_token
elif use_auth_token is True:
a__ : Dict = HfFolder.get_token()
else:
a__ : str = None
a__ : Any = model_info(_lowercase , revision=_lowercase , token=_lowercase).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
a__ : Optional[int] = submodule_path / commit_hash
a__ : Tuple = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowercase)
if not (submodule_path / module_file).exists():
shutil.copy(_lowercase , submodule_path / module_file)
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowercase , F'''{module_needed}.py''' , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return os.path.join(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : Union[str, os.PathLike] , _lowercase : str , _lowercase : Optional[str] = None , _lowercase : Optional[Union[str, os.PathLike]] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[Dict[str, str]] = None , _lowercase : Optional[Union[bool, str]] = None , _lowercase : Optional[str] = None , _lowercase : bool = False , **_lowercase : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
a__ : int = get_cached_module_file(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return get_class_in_module(_lowercase , final_module.replace(""".py""" , """"""))
| 266 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCAmelCase : Optional[int] = 4
__lowerCAmelCase : Optional[Any] = 3
class UpperCAmelCase_ ( a__ ):
'''simple docstring'''
pass
def a__ ( A_ ):
'''simple docstring'''
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def a__ ( ):
'''simple docstring'''
__magic_name__ = int(os.environ["""RANK"""] )
__magic_name__ = int(os.environ["""WORLD_SIZE"""] )
__magic_name__ = ArgumentParser()
parser.add_argument("""--streaming""", type=A_ )
parser.add_argument("""--local_rank""", type=A_ )
parser.add_argument("""--num_workers""", type=A_, default=0 )
__magic_name__ = parser.parse_args()
__magic_name__ = args.streaming
__magic_name__ = args.num_workers
__magic_name__ = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(A_ )]}
__magic_name__ = IterableDataset.from_generator(A_, gen_kwargs=A_ )
if not streaming:
__magic_name__ = Dataset.from_list(list(A_ ) )
__magic_name__ = split_dataset_by_node(A_, rank=A_, world_size=A_ )
__magic_name__ = torch.utils.data.DataLoader(A_, num_workers=A_ )
__magic_name__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__magic_name__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__magic_name__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 88 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[int] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = 0
while number > 0:
lowercase__ : str = number % 10
sum_of_digits += last_digit
lowercase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCamelCase ( UpperCAmelCase = 100 ):
lowercase__ : Optional[int] = factorial(UpperCAmelCase )
lowercase__ : Dict = split_and_add(UpperCAmelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 198 | 0 |
from ..utils import DummyObject, requires_backends
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[str]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Dict:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> int:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *A , **A ) -> int:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> List[Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Tuple:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *A , **A ) -> Any:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *A , **A ) -> Tuple:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> str:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __lowercase (metaclass=UpperCamelCase__ ):
"""simple docstring"""
_snake_case = ["""torch""", """transformers""", """onnx"""]
def __init__( self , *A , **A ) -> List[str]:
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Any:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase ( cls , *A , **A ) -> Union[str, Any]:
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 176 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = ConsistencyModelPipeline
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
_snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def UpperCAmelCase ( self ) -> List[str]:
snake_case : Dict = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def UpperCAmelCase ( self ) -> Any:
snake_case : Optional[int] = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def UpperCAmelCase ( self , A=False ) -> Optional[Any]:
if class_cond:
snake_case : List[str] = self.dummy_cond_unet
else:
snake_case : Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def UpperCAmelCase ( self , A , A=0 ) -> Optional[int]:
if str(A ).startswith("""mps""" ):
snake_case : Union[str, Any] = torch.manual_seed(A )
else:
snake_case : Dict = torch.Generator(device=A ).manual_seed(A )
snake_case : Tuple = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : List[Any] = self.get_dummy_components()
snake_case : Dict = ConsistencyModelPipeline(**A )
snake_case : int = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Any = self.get_dummy_inputs(A )
snake_case : Tuple = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Dict = image[0, -3:, -3:, -1]
snake_case : Tuple = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Tuple = self.get_dummy_components(class_cond=A )
snake_case : Dict = ConsistencyModelPipeline(**A )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : str = self.get_dummy_inputs(A )
snake_case : Optional[int] = 0
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.35_72, 0.62_73, 0.40_31, 0.39_61, 0.43_21, 0.57_30, 0.52_66, 0.47_80, 0.50_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Dict:
snake_case : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Optional[int] = self.get_dummy_components()
snake_case : List[str] = ConsistencyModelPipeline(**A )
snake_case : Dict = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Dict = self.get_dummy_inputs(A )
snake_case : Tuple = 1
snake_case : Optional[Any] = None
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case : Dict = self.get_dummy_components(class_cond=A )
snake_case : List[Any] = ConsistencyModelPipeline(**A )
snake_case : List[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[Any] = self.get_dummy_inputs(A )
snake_case : Optional[Any] = 1
snake_case : Any = None
snake_case : Optional[int] = 0
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 3_2, 3_2, 3)
snake_case : Optional[int] = image[0, -3:, -3:, -1]
snake_case : List[str] = np.array([0.50_04, 0.50_04, 0.49_94, 0.50_08, 0.49_76, 0.50_18, 0.49_90, 0.49_82, 0.49_87] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self , A=0 , A=False , A="cpu" , A=torch.floataa , A=(1, 3, 6_4, 6_4) ) -> int:
snake_case : Union[str, Any] = torch.manual_seed(A )
snake_case : Tuple = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
snake_case : Optional[int] = self.get_fixed_latents(seed=A , device=A , dtype=A , shape=A )
snake_case : int = latents
return inputs
def UpperCAmelCase ( self , A=0 , A="cpu" , A=torch.floataa , A=(1, 3, 6_4, 6_4) ) -> Any:
if type(A ) == str:
snake_case : List[str] = torch.device(A )
snake_case : Any = torch.Generator(device=A ).manual_seed(A )
snake_case : Dict = randn_tensor(A , generator=A , device=A , dtype=A )
return latents
def UpperCAmelCase ( self ) -> Dict:
snake_case : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Optional[int] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Any = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
snake_case : Tuple = self.get_inputs()
snake_case : List[str] = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Optional[Any] = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.08_88, 0.08_81, 0.06_66, 0.04_79, 0.02_92, 0.01_95, 0.02_01, 0.01_63, 0.02_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def UpperCAmelCase ( self ) -> Tuple:
snake_case : Tuple = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : int = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : List[str] = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A )
pipe.set_progress_bar_config(disable=A )
snake_case : Union[str, Any] = self.get_inputs()
snake_case : Tuple = 1
snake_case : List[str] = None
snake_case : Any = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Union[str, Any] = np.array([0.03_40, 0.01_52, 0.00_63, 0.02_67, 0.02_21, 0.01_07, 0.04_16, 0.01_86, 0.02_17] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def UpperCAmelCase ( self ) -> Optional[int]:
snake_case : List[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : List[Any] = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
snake_case : List[str] = self.get_inputs(get_fixed_latents=A , device=A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
snake_case : Tuple = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case : int = np.array([0.18_75, 0.14_28, 0.12_89, 0.21_51, 0.20_92, 0.14_77, 0.18_77, 0.16_41, 0.13_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[Any] = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
snake_case : Any = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.0_02 , sigma_max=80.0 , )
snake_case : Dict = ConsistencyModelPipeline(unet=A , scheduler=A )
pipe.to(torch_device=A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=A )
snake_case : Optional[int] = self.get_inputs(get_fixed_latents=A , device=A )
snake_case : Union[str, Any] = 1
snake_case : Dict = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=A , enable_math=A , enable_mem_efficient=A ):
snake_case : List[Any] = pipe(**A ).images
assert image.shape == (1, 6_4, 6_4, 3)
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Dict = np.array([0.16_63, 0.19_48, 0.22_75, 0.16_80, 0.12_04, 0.12_45, 0.18_58, 0.13_38, 0.20_95] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 176 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> str:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Dict = use_auxiliary_loss
lowerCAmelCase__ : Union[str, Any] = num_queries
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : List[str] = min_size
lowerCAmelCase__ : int = max_size
lowerCAmelCase__ : Optional[Any] = num_labels
lowerCAmelCase__ : List[Any] = mask_feature_size
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : Optional[int] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Dict:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[str] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[int] = output.encoder_hidden_states
lowerCAmelCase__ : Optional[int] = output.pixel_decoder_hidden_states
lowerCAmelCase__ : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]:
with torch.no_grad():
lowerCAmelCase__ : int = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : str = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : int = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Dict = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : Dict = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : int = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : Union[str, Any] = False
__lowercase : Dict = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : str = MaskFormerModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> str:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Dict = [*signature.parameters.keys()]
lowerCAmelCase__ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : List[str] = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Any = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Tuple = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Dict = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> List[str]:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Tuple = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : Dict = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : Union[str, Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Dict = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Dict = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : List[str] = prepare_img()
lowerCAmelCase__ : str = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Optional[int] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : int = prepare_img()
lowerCAmelCase__ : Optional[Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : str = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : int = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : List[str] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : str = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Union[str, Any] = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : Union[str, Any] = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 37 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase_:
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[Any]:
return None
class lowerCAmelCase_:
'''simple docstring'''
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return None
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
__lowercase : Dict = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase_ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> Any:
from transformers import BertModel
lowerCAmelCase__ : Optional[int] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__UpperCAmelCase ) )
vocab_file.flush()
lowerCAmelCase__ : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCAmelCase__ : Tuple = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,__UpperCAmelCase )
@require_tf
@slow
def UpperCAmelCase_ ( self ) -> List[str]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase__ : Dict = self._test_export(__UpperCAmelCase ,"""tf""" ,12 ,**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def UpperCAmelCase_ ( self ) -> List[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCAmelCase__ : Any = self._test_export(__UpperCAmelCase ,"""pt""" ,12 ,**__UpperCAmelCase )
lowerCAmelCase__ : Dict = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ : Optional[int] = Path(__UpperCAmelCase ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
from transformers import BertModel
lowerCAmelCase__ : List[Any] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase__ : Union[str, Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""pt""" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase_ ( self ) -> Optional[int]:
from transformers import TFBertModel
lowerCAmelCase__ : int = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCAmelCase__ : Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,"""tf""" )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
lowerCAmelCase__ : Any = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} )
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCAmelCase__ , lowerCAmelCase__ : int = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) ,1 )
self.assertEqual(len(__UpperCAmelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] ,"""input_ids""" )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Dict = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
| 37 | 1 |
def a__ ( UpperCAmelCase : str ) -> List[str]:
stooge(UpperCAmelCase , 0 , len(UpperCAmelCase ) - 1 )
return arr
def a__ ( UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCAmelCase : List[str] = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCAmelCase : Tuple = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase , UpperCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCAmelCase , i + t , (UpperCAmelCase) )
# Recursively sort first 2/3 elements
stooge(UpperCAmelCase , UpperCAmelCase , (h - t) )
if __name__ == "__main__":
_lowerCamelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_lowerCamelCase : List[str] = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 368 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = ShapEImgaImgPipeline
UpperCamelCase = ["""image"""]
UpperCamelCase = ["""image"""]
UpperCamelCase = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase = False
@property
def __magic_name__ ( self : Optional[Any] ):
return 3_2
@property
def __magic_name__ ( self : Optional[int] ):
return 3_2
@property
def __magic_name__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def __magic_name__ ( self : Any ):
return 8
@property
def __magic_name__ ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=6_4, projection_dim=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, )
UpperCAmelCase : Any = CLIPVisionModel(__A )
return model
@property
def __magic_name__ ( self : int ):
UpperCAmelCase : Optional[int] = CLIPImageProcessor(
crop_size=2_2_4, do_center_crop=__A, do_normalize=__A, do_resize=__A, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=2_2_4, )
return image_processor
@property
def __magic_name__ ( self : Dict ):
torch.manual_seed(0 )
UpperCAmelCase : Any = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 1_6,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 3_2,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase : List[Any] = PriorTransformer(**__A )
return model
@property
def __magic_name__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = {
'''param_shapes''': (
(self.renderer_dim, 9_3),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 1_2,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase : List[str] = ShapERenderer(**__A )
return model
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : str = self.dummy_prior
UpperCAmelCase : List[str] = self.dummy_image_encoder
UpperCAmelCase : List[Any] = self.dummy_image_processor
UpperCAmelCase : Dict = self.dummy_renderer
UpperCAmelCase : int = HeunDiscreteScheduler(
beta_schedule='''exp''', num_train_timesteps=1_0_2_4, prediction_type='''sample''', use_karras_sigmas=__A, clip_sample=__A, clip_sample_range=1.0, )
UpperCAmelCase : List[str] = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __magic_name__ ( self : List[Any], __A : Dict, __A : List[Any]=0 ):
UpperCAmelCase : int = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
UpperCAmelCase : List[str] = torch.manual_seed(__A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
UpperCAmelCase : Dict = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 3_2,
'''output_type''': '''np''',
}
return inputs
def __magic_name__ ( self : str ):
UpperCAmelCase : Dict = '''cpu'''
UpperCAmelCase : Any = self.get_dummy_components()
UpperCAmelCase : Tuple = self.pipeline_class(**__A )
UpperCAmelCase : Dict = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(__A ) )
UpperCAmelCase : Any = output.images[0]
UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (2_0, 3_2, 3_2, 3)
UpperCAmelCase : Optional[int] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : Dict ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __magic_name__ ( self : str ):
UpperCAmelCase : Tuple = torch_device == '''cpu'''
UpperCAmelCase : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2, test_max_difference=__A, relax_max_difference=__A, )
def __magic_name__ ( self : Dict ):
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**__A )
UpperCAmelCase : List[str] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Any = 1
UpperCAmelCase : Any = 2
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__A )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase : Any = batch_size * [inputs[key]]
UpperCAmelCase : int = pipe(**__A, num_images_per_prompt=__A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
UpperCAmelCase : Dict = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
UpperCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
UpperCAmelCase : int = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
UpperCAmelCase : Dict = torch.Generator(device=__A ).manual_seed(0 )
UpperCAmelCase : List[str] = pipe(
__A, generator=__A, guidance_scale=3.0, num_inference_steps=6_4, frame_size=6_4, output_type='''np''', ).images[0]
assert images.shape == (2_0, 6_4, 6_4, 3)
assert_mean_pixel_difference(__A, __A )
| 99 | 0 |
import unittest
from transformers import DonutProcessor
__UpperCAmelCase : Union[str, Any] = "naver-clova-ix/donut-base"
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = DonutProcessor.from_pretrained(A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: int = {
"""name""": """John Doe""",
"""age""": """99""",
"""city""": """Atlanta""",
"""state""": """GA""",
"""zip""": """30301""",
"""phone""": """123-4567""",
"""nicknames""": [{"""nickname""": """Johnny"""}, {"""nickname""": """JD"""}],
}
__snake_case: Tuple = (
"""<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"""
"""<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"""
"""<s_nicknames><s_nickname>Johnny</s_nickname>"""
"""<sep/><s_nickname>JD</s_nickname></s_nicknames>"""
)
__snake_case: Dict = self.processor.tokenajson(A )
self.assertDictEqual(A , A )
| 111 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 111 | 1 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _UpperCamelCase ( snake_case__ = "laptop" ) -> DataFrame:
__UpperCAmelCase : Tuple = f'''https://www.amazon.in/laptop/s?k={product}'''
__UpperCAmelCase : Dict = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__UpperCAmelCase : Any = BeautifulSoup(requests.get(snake_case__, headers=snake_case__ ).text )
# Initialize a Pandas dataframe with the column titles
__UpperCAmelCase : Union[str, Any] = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div", attrs={"class": "s-result-item", "data-component-type": "s-search-result"}, ), soup.find_all("div", attrs={"class": "a-row a-size-base a-color-base"} ), ):
try:
__UpperCAmelCase : List[str] = item.ha.text
__UpperCAmelCase : List[str] = "https://www.amazon.in/" + item.ha.a["href"]
__UpperCAmelCase : Optional[int] = item.find("span", attrs={"class": "a-offscreen"} ).text
try:
__UpperCAmelCase : Tuple = item.find("span", attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__UpperCAmelCase : Any = "Not available"
try:
__UpperCAmelCase : List[str] = (
"₹"
+ item.find(
"span", attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__UpperCAmelCase : Optional[Any] = ""
try:
__UpperCAmelCase : Optional[int] = float(
(
(
float(product_mrp.strip("₹" ).replace(",", "" ) )
- float(product_price.strip("₹" ).replace(",", "" ) )
)
/ float(product_mrp.strip("₹" ).replace(",", "" ) )
)
* 100 )
except ValueError:
__UpperCAmelCase : int = float("nan" )
except AttributeError:
pass
__UpperCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__UpperCAmelCase : List[Any] = " "
__UpperCAmelCase : int = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
_snake_case = '''headphones'''
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 342 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Dict = "roc_bert"
def __init__( self: int , __lowerCamelCase: Union[str, Any]=3_05_22 , __lowerCamelCase: int=7_68 , __lowerCamelCase: Any=12 , __lowerCamelCase: int=12 , __lowerCamelCase: Union[str, Any]=30_72 , __lowerCamelCase: Union[str, Any]="gelu" , __lowerCamelCase: Optional[int]=0.1 , __lowerCamelCase: str=0.1 , __lowerCamelCase: Any=5_12 , __lowerCamelCase: Union[str, Any]=2 , __lowerCamelCase: str=0.02 , __lowerCamelCase: int=1e-12 , __lowerCamelCase: str=True , __lowerCamelCase: int=0 , __lowerCamelCase: List[str]="absolute" , __lowerCamelCase: List[Any]=None , __lowerCamelCase: Optional[int]=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: Dict=7_68 , __lowerCamelCase: Optional[int]=9_10 , __lowerCamelCase: Union[str, Any]=5_12 , __lowerCamelCase: int=2_48_58 , __lowerCamelCase: Optional[int]=True , **__lowerCamelCase: Any , ) -> List[Any]:
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Optional[int] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : List[str] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : Optional[Any] = enable_pronunciation
__UpperCAmelCase : Any = enable_shape
__UpperCAmelCase : Union[str, Any] = pronunciation_embed_dim
__UpperCAmelCase : Optional[Any] = pronunciation_vocab_size
__UpperCAmelCase : Optional[Any] = shape_embed_dim
__UpperCAmelCase : List[Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : int = position_embedding_type
__UpperCAmelCase : Optional[int] = classifier_dropout
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
| 342 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class __snake_case :
def __init__( self : Tuple , __lowerCAmelCase : Tuple , __lowerCAmelCase : Any=2 , __lowerCAmelCase : Any=True , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Optional[Any]=1_0 , __lowerCAmelCase : List[str]=3 , __lowerCAmelCase : int=3_2 * 4 , __lowerCAmelCase : Dict=3_2 * 6 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : List[str]=3_2 , ):
"""simple docstring"""
_lowerCamelCase : List[str] = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Dict = is_training
_lowerCamelCase : str = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[Any] = num_channels
_lowerCamelCase : int = min_size
_lowerCamelCase : Any = max_size
_lowerCamelCase : int = num_labels
_lowerCamelCase : List[str] = mask_feature_size
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
_lowerCamelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = self.prepare_config_and_inputs()
_lowerCamelCase : List[str] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : List[str] = output.encoder_hidden_states
_lowerCamelCase : Tuple = output.pixel_decoder_hidden_states
_lowerCamelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_config.decoder_layers )
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[Any]=False ):
"""simple docstring"""
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
_lowerCamelCase : Tuple = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : List[str] = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerForInstanceSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_lowerCamelCase : str = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __snake_case ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
snake_case__ : Any = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : List[str] = False
snake_case__ : Optional[int] = False
snake_case__ : Dict = False
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = MaskFormerModelTester(self )
_lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Dict = [*signature.parameters.keys()]
_lowerCamelCase : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCamelCase : Union[str, Any] = MaskFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCamelCase : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 1_0, *size) , device=__lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 1_0 , device=__lowerCAmelCase ).long(),
}
_lowerCamelCase : int = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[Any] = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
_lowerCamelCase : List[str] = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCamelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : Any = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : List[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[str] = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
_lowerCamelCase : Optional[Any] = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
_lowerCamelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCamelCase : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase__ = 1E-4
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class __snake_case ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : str = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = self.default_image_processor
_lowerCamelCase : List[Any] = prepare_img()
_lowerCamelCase : Any = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : Any = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : int = model(**__lowerCAmelCase )
_lowerCamelCase : str = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
_lowerCamelCase : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : int = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : str = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
_lowerCamelCase : Any = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : List[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : str = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Any = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : Tuple = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase , return_tensors='''pt''' ).to(__lowerCAmelCase )
_lowerCamelCase : List[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 8_0_0, 1_0_8_8) )
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**__lowerCAmelCase )
# masks_queries_logits
_lowerCamelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_lowerCamelCase : int = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
_lowerCamelCase : List[Any] = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
_lowerCamelCase : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_lowerCamelCase : Any = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(__lowerCAmelCase )
.eval()
)
_lowerCamelCase : int = self.default_image_processor
_lowerCamelCase : List[str] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='''pt''' , )
_lowerCamelCase : Union[str, Any] = inputs['''pixel_values'''].to(__lowerCAmelCase )
_lowerCamelCase : Dict = [el.to(__lowerCAmelCase ) for el in inputs['''mask_labels''']]
_lowerCamelCase : Optional[Any] = [el.to(__lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_lowerCamelCase : Tuple = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : list[int], A_ : str ):
'''simple docstring'''
_lowerCamelCase : Tuple = int(A_ )
# Initialize Result
_lowerCamelCase : Dict = []
# Traverse through all denomination
for denomination in reversed(A_ ):
# Find denominations
while int(A_ ) >= int(A_ ):
total_value -= int(A_ )
answer.append(A_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase__ = []
lowerCAmelCase__ = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
lowerCAmelCase__ = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
lowerCAmelCase__ = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase__ = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
lowerCAmelCase__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''')
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
def snake_case ( A__ ,A__ = None ):
UpperCAmelCase_ : Optional[int] = word_bank or []
# create a table
UpperCAmelCase_ : int = len(A__ ) + 1
UpperCAmelCase_ : list[list[list[str]]] = []
for _ in range(A__ ):
table.append([] )
# seed value
UpperCAmelCase_ : Union[str, Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(A__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(A__ )] == word:
UpperCAmelCase_ : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(A__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(A__ )]:
combination.reverse()
return table[len(A__ )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 253 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCamelCase_ (unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModel.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForPreTraining.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : str = TFAutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = AutoModelForCausalLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = AutoModelForMaskedLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = AutoModelForMaskedLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : int = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
UpperCAmelCase_ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : int = AutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : str = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : Optional[Any] = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_pt=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
UpperCAmelCase_ : str = AutoModelWithLMHead.from_pretrained(lowerCAmelCase_ , from_tf=lowerCAmelCase_ )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(model.num_parameters() , 14_410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase_ ) , 14_410 )
| 253 | 1 |
'''simple docstring'''
__A ='0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 163 |
'''simple docstring'''
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = False):
UpperCAmelCase__ : str = scheduler
UpperCAmelCase__ : Dict = optimizers if isinstance(_lowerCamelCase , (list, tuple)) else [optimizers]
UpperCAmelCase__ : List[Any] = split_batches
UpperCAmelCase__ : Tuple = step_with_optimizer
UpperCAmelCase__ : Union[str, Any] = GradientState()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase__ : Dict = AcceleratorState().num_processes
for _ in range(_lowerCamelCase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps"""):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
else:
self.scheduler.step(*_lowerCamelCase , **_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_last_lr()
def snake_case__ ( self):
return self.scheduler.state_dict()
def snake_case__ ( self , _lowerCamelCase):
self.scheduler.load_state_dict(_lowerCamelCase)
def snake_case__ ( self):
return self.scheduler.get_lr()
def snake_case__ ( self , *_lowerCamelCase , **_lowerCamelCase):
return self.scheduler.print_lr(*_lowerCamelCase , **_lowerCamelCase) | 163 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( _UpperCAmelCase ):
@staticmethod
@abstractmethod
def __A ( UpperCAmelCase__ ):
raise NotImplementedError()
@abstractmethod
def __A ( self ):
raise NotImplementedError()
| 198 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
def UpperCamelCase ( _A : str )-> List[str]:
"""simple docstring"""
A__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
A__ = MaskFormerConfig(backbone_config=_A )
A__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
A__ = 847
A__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
A__ = 150
A__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
A__ = 171
A__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
A__ = 133
A__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
A__ = 19
A__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
A__ = 65
A__ = "mapillary-vistas-id2label.json"
A__ = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
A__ = {int(_A ): v for k, v in idalabel.items()}
return config
def UpperCamelCase ( _A : Tuple )-> Union[str, Any]:
"""simple docstring"""
A__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def UpperCamelCase ( _A : int , _A : List[str] , _A : List[str] )-> Any:
"""simple docstring"""
A__ = dct.pop(_A )
A__ = val
def UpperCamelCase ( _A : Tuple , _A : List[str] )-> Dict:
"""simple docstring"""
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
A__ = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( _A : Tuple , _A : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
A__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: hidden_size, :]
A__ = in_proj_bias[:config.hidden_size]
A__ = in_proj_weight[hidden_size : hidden_size * 2, :]
A__ = in_proj_bias[hidden_size : hidden_size * 2]
A__ = in_proj_weight[-hidden_size :, :]
A__ = in_proj_bias[-hidden_size :]
# fmt: on
def UpperCamelCase ( )-> torch.Tensor:
"""simple docstring"""
A__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
A__ = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( _A : str , _A : str , _A : str , _A : bool = False )-> Tuple:
"""simple docstring"""
A__ = get_maskformer_config(_A )
# load original state_dict
with open(_A , "rb" ) as f:
A__ = pickle.load(_A )
A__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
A__ = create_rename_keys(_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_swin_q_k_v(_A , config.backbone_config )
read_in_decoder_q_k_v(_A , _A )
# update to torch tensors
for key, value in state_dict.items():
A__ = torch.from_numpy(_A )
# load 🤗 model
A__ = MaskFormerForInstanceSegmentation(_A )
model.eval()
for name, param in model.named_parameters():
print(_A , param.shape )
A__ , A__ = model.load_state_dict(_A , strict=_A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_A ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
A__ = prepare_img()
if "vistas" in model_name:
A__ = 65
elif "cityscapes" in model_name:
A__ = 65535
else:
A__ = 255
A__ = True if "ade" in model_name else False
A__ = MaskFormerImageProcessor(ignore_index=_A , reduce_labels=_A )
A__ = image_processor(_A , return_tensors="pt" )
A__ = model(**_A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
A__ = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _A , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Tuple = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 198 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
assert x is not None
assert y is not None
A__ = len(SCREAMING_SNAKE_CASE__ )
A__ = len(SCREAMING_SNAKE_CASE__ )
# declaring the array for storing the dp values
A__ = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
A__ = 1 if x[i - 1] == y[j - 1] else 0
A__ = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
A__ = ''
A__ , A__ = m, n
while i > 0 and j > 0:
A__ = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
A__ = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowercase_ = "AGGTAB"
lowercase_ = "GXTXAYB"
lowercase_ = 4
lowercase_ = "GTAB"
lowercase_ , lowercase_ = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 7 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
A__ = 384
A__ = 7
if "tiny" in model_name:
A__ = 96
A__ = (2, 2, 6, 2)
A__ = (3, 6, 12, 24)
elif "small" in model_name:
A__ = 96
A__ = (2, 2, 18, 2)
A__ = (3, 6, 12, 24)
elif "base" in model_name:
A__ = 128
A__ = (2, 2, 18, 2)
A__ = (4, 8, 16, 32)
A__ = 12
A__ = 512
elif "large" in model_name:
A__ = 192
A__ = (2, 2, 18, 2)
A__ = (6, 12, 24, 48)
A__ = 12
A__ = 768
# set label information
A__ = 150
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
A__ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , num_heads=SCREAMING_SNAKE_CASE__ , window_size=SCREAMING_SNAKE_CASE__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
A__ = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE__ , auxiliary_in_channels=SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , )
return config
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
A__ = []
# fmt: off
# stem
rename_keys.append(('backbone.patch_embed.projection.weight', 'backbone.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.projection.bias', 'backbone.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'backbone.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'backbone.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index', f'backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias', f'backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.weight', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.norm2.bias', f'backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias', f'backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias', f'backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.stages.{i}.downsample.reduction.weight', f'backbone.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.weight', f'backbone.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.stages.{i}.downsample.norm.bias', f'backbone.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('decode_head.conv_seg.weight', 'decode_head.classifier.weight'),
('decode_head.conv_seg.bias', 'decode_head.classifier.bias'),
('auxiliary_head.conv_seg.weight', 'auxiliary_head.classifier.weight'),
('auxiliary_head.conv_seg.bias', 'auxiliary_head.classifier.bias'),
] )
# fmt: on
return rename_keys
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = dct.pop(SCREAMING_SNAKE_CASE__ )
A__ = val
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
'''simple docstring'''
A__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
A__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight' )
A__ = state_dict.pop(f'backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:dim, :]
A__ = in_proj_bias[: dim]
A__ = in_proj_weight[
dim : dim * 2, :
]
A__ = in_proj_bias[
dim : dim * 2
]
A__ = in_proj_weight[
-dim :, :
]
A__ = in_proj_bias[-dim :]
# fmt: on
def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , 4 , in_channel // 4 )
A__ = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple ) -> List[str]:
'''simple docstring'''
A__ , A__ = x.shape
A__ = x.reshape(SCREAMING_SNAKE_CASE__ , in_channel // 4 , 4 )
A__ = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(4 , in_channel // 4 )
A__ = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
'''simple docstring'''
A__ = x.shape[0]
A__ = x.reshape(in_channel // 4 , 4 )
A__ = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE__ )
return x
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ = {
'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth',
'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth',
'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth',
'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth',
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='cpu' , file_name=SCREAMING_SNAKE_CASE__ )[
'state_dict'
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE__ , param.shape )
A__ = get_upernet_config(SCREAMING_SNAKE_CASE__ )
A__ = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "bn" in key:
A__ = key.replace('bn' , 'batch_norm' )
A__ = val
# rename keys
A__ = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
A__ = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE__ )
if "norm" in key:
A__ = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify on image
A__ = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw ).convert('RGB' )
A__ = SegformerImageProcessor()
A__ = processor(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = outputs.logits
print(logits.shape )
print('First values of logits:' , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
A__ = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
A__ = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
A__ = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
A__ = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print('Logits:' , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[f"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 7 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase : Dict = '''mctct'''
def __init__( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=80_65 , lowerCamelCase__ : List[Any]=15_36 , lowerCamelCase__ : List[str]=36 , lowerCamelCase__ : str=61_44 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : Dict=3_84 , lowerCamelCase__ : Any=9_20 , lowerCamelCase__ : List[str]=1E-5 , lowerCamelCase__ : Any=0.3 , lowerCamelCase__ : List[Any]="relu" , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Any=0.3 , lowerCamelCase__ : Any=0.3 , lowerCamelCase__ : Any=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : str=0.3 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Any=(7,) , lowerCamelCase__ : Dict=(3,) , lowerCamelCase__ : Union[str, Any]=80 , lowerCamelCase__ : List[Any]=1 , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[Any]="sum" , lowerCamelCase__ : List[Any]=False , **lowerCamelCase__ : Tuple , ) ->Dict:
'''simple docstring'''
super().__init__(**A__ , pad_token_id=A__ , bos_token_id=A__ , eos_token_id=A__ )
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Optional[Any] = num_hidden_layers
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : int = attention_head_dim
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Tuple = layerdrop
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Union[str, Any] = pad_token_id
_UpperCAmelCase : str = bos_token_id
_UpperCAmelCase : Optional[int] = eos_token_id
_UpperCAmelCase : int = conv_glu_dim
_UpperCAmelCase : Optional[Any] = conv_dropout
_UpperCAmelCase : Optional[int] = num_conv_layers
_UpperCAmelCase : List[Any] = input_feat_per_channel
_UpperCAmelCase : int = input_channels
_UpperCAmelCase : Any = conv_channels
_UpperCAmelCase : Tuple = ctc_loss_reduction
_UpperCAmelCase : List[Any] = ctc_zero_infinity
# prevents config testing fail with exporting to json
_UpperCAmelCase : List[str] = list(A__ )
_UpperCAmelCase : Any = list(A__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.conv_kernel)` == `config.num_conv_layers` "
F"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
| 359 |
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (__lowerCAmelCase ):
if is_torch_version("<" , "2.0.0" ) or not hasattr(__lowerCAmelCase , "_dynamo" ):
return False
return isinstance(__lowerCAmelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = True ):
_UpperCAmelCase : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_UpperCAmelCase : Dict = is_compiled_module(__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : Optional[int] = model
_UpperCAmelCase : Any = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = model.module
if not keep_fpaa_wrapper:
_UpperCAmelCase : List[Any] = getattr(__lowerCAmelCase , "forward" )
_UpperCAmelCase : Dict = model.__dict__.pop("_original_forward" , __lowerCAmelCase )
if original_forward is not None:
while hasattr(__lowerCAmelCase , "__wrapped__" ):
_UpperCAmelCase : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
_UpperCAmelCase : Dict = forward
if getattr(__lowerCAmelCase , "_converted_to_transformer_engine" , __lowerCAmelCase ):
convert_model(__lowerCAmelCase , to_transformer_engine=__lowerCAmelCase )
if is_compiled:
_UpperCAmelCase : int = model
_UpperCAmelCase : str = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(__lowerCAmelCase , __lowerCAmelCase )
elif PartialState().local_process_index == 0:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
@contextmanager
def __lowerCAmelCase (**__lowerCAmelCase ):
for key, value in kwargs.items():
_UpperCAmelCase : str = str(__lowerCAmelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (__lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , "__qualname__" ) and not hasattr(__lowerCAmelCase , "__name__" ):
_UpperCAmelCase : List[str] = getattr(__lowerCAmelCase , "__class__" , __lowerCAmelCase )
if hasattr(__lowerCAmelCase , "__qualname__" ):
return obj.__qualname__
if hasattr(__lowerCAmelCase , "__name__" ):
return obj.__name__
return str(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key, value in source.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = destination.setdefault(__lowerCAmelCase , {} )
merge_dicts(__lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Optional[int] = value
return destination
def __lowerCAmelCase (__lowerCAmelCase = None ):
if port is None:
_UpperCAmelCase : Tuple = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("localhost", port) ) == 0
| 322 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCAmelCase ( UpperCamelCase__):
@require_torch
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[Any] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : Any ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : int ="\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : str ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
a__ : List[str] =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : List[str] =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : Union[str, Any] ="1"
a__ : List[str] =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : List[Any] ="\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
a__ : int ="\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
a__ : str ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
a__ : Any ="hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(lowerCAmelCase__ )
BertModel.from_pretrained(lowerCAmelCase__ )
BertTokenizer.from_pretrained(lowerCAmelCase__ )
pipeline(task="fill-mask" , model=lowerCAmelCase__ )
# baseline - just load from_pretrained with normal network
a__ : str =[sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
a__ : Any =self.get_env()
a__ : Dict =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any ="\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
a__ : Optional[Any] ="\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
a__ : Optional[int] ="\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
a__ : Any =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : List[Any] =self.get_env()
a__ : Dict =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
a__ : Optional[int] =[sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : List[Any] ="1"
a__ : str =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any ="\nfrom transformers import pipeline\n "
a__ : Any ="\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
a__ : Tuple ="\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
a__ : Any =self.get_env()
a__ : List[Any] ="1"
a__ : Tuple =[sys.executable, "-c", "\n".join([load, mock, run] )]
a__ : Any =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Dict ="\nfrom transformers import AutoModel\n "
a__ : str ="\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
a__ : List[str] =[sys.executable, "-c", "\n".join([load, run] )]
# should succeed
a__ : str =self.get_env()
a__ : Optional[int] =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a__ : int ="1"
a__ : List[str] =subprocess.run(lowerCAmelCase__ , env=lowerCAmelCase__ , check=lowerCAmelCase__ , capture_output=lowerCAmelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 95 |
def _A ( SCREAMING_SNAKE_CASE : int = 50 ):
"""simple docstring"""
a__ : Any =[1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 95 | 1 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
SCREAMING_SNAKE_CASE__ = True
except (ImportError, ModuleNotFoundError):
SCREAMING_SNAKE_CASE__ = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowercase__ ( __UpperCamelCase )-> Dict:
re.sub("""<n>""" , """""" , lowerCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase__ ) )
| 359 |
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowercase__ ( __UpperCamelCase )-> str:
return "".join(sorted(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase )-> list[str]:
return word_by_signature[signature(__UpperCamelCase )]
SCREAMING_SNAKE_CASE__ = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
SCREAMING_SNAKE_CASE__ = sorted({word.strip().lower() for word in data.splitlines()})
SCREAMING_SNAKE_CASE__ = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 183 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowerCAmelCase ( __a ):
_lowercase ='''pegasus'''
_lowercase =['''past_key_values''']
_lowercase ={'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _UpperCamelCase=50_265 , _UpperCamelCase=1_024 , _UpperCamelCase=12 , _UpperCamelCase=4_096 , _UpperCamelCase=16 , _UpperCamelCase=12 , _UpperCamelCase=4_096 , _UpperCamelCase=16 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase="gelu" , _UpperCamelCase=1_024 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=0.02 , _UpperCamelCase=0 , _UpperCamelCase=False , _UpperCamelCase=0 , _UpperCamelCase=1 , _UpperCamelCase=1 , **_UpperCamelCase , ) -> Tuple:
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = d_model
lowerCAmelCase_ = encoder_ffn_dim
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = encoder_attention_heads
lowerCAmelCase_ = decoder_ffn_dim
lowerCAmelCase_ = decoder_layers
lowerCAmelCase_ = decoder_attention_heads
lowerCAmelCase_ = dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = init_std
lowerCAmelCase_ = encoder_layerdrop
lowerCAmelCase_ = decoder_layerdrop
lowerCAmelCase_ = use_cache
lowerCAmelCase_ = encoder_layers
lowerCAmelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , is_encoder_decoder=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , forced_eos_token_id=_UpperCamelCase , **_UpperCamelCase , )
@property
def __a ( self ) -> int:
return self.encoder_attention_heads
@property
def __a ( self ) -> int:
return self.d_model
| 231 |
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =None
_lowercase =BloomTokenizerFast
_lowercase =BloomTokenizerFast
_lowercase =True
_lowercase =False
_lowercase ='''tokenizer_file'''
_lowercase ={'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __a ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **_UpperCamelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase_ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase_ = tokenizer.batch_encode_plus(_UpperCamelCase )["input_ids"]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self , _UpperCamelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase_ = None # Hotfixing padding = None
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCamelCase )
lowerCAmelCase_ = next(iter(_UpperCamelCase ) )["premise"] # pick up one data
lowerCAmelCase_ = list(sample_data.values() )
lowerCAmelCase_ = list(map(tokenizer.encode , _UpperCamelCase ) )
lowerCAmelCase_ = [tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) for x in output_tokens]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> List[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 231 | 1 |
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@parameterized.expand([(None,), ("foo.json",)] )
def A_ ( self : List[Any], _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase, config_name=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig.from_pretrained(_UpperCAmelCase, config_name=_UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample, _UpperCAmelCase )
self.assertEqual(loaded_config.temperature, 0.7 )
self.assertEqual(loaded_config.length_penalty, 1.0 )
self.assertEqual(loaded_config.bad_words_ids, [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k, 5_0 )
self.assertEqual(loaded_config.max_length, 2_0 )
self.assertEqual(loaded_config.max_time, _UpperCAmelCase )
def A_ ( self : List[str] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = AutoConfig.from_pretrained("gpt2" )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig.from_model_config(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCAmelCase, _UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id, default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id, model_config.eos_token_id )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"max_new_tokens": 1_0_2_4,
"foo": "bar",
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = copy.deepcopy(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = generation_config.update(**_UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCAmelCase, _UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens, 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCAmelCase, {"foo": "bar"} )
def A_ ( self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = GenerationConfig()
SCREAMING_SNAKE_CASE__ : Any = "bar"
with tempfile.TemporaryDirectory("test-generation-config" ) as tmp_dir:
generation_config.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig.from_pretrained(_UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo, "bar" )
SCREAMING_SNAKE_CASE__ : Any = GenerationConfig.from_model_config(_UpperCAmelCase )
assert not hasattr(_UpperCAmelCase, "foo" ) # no new kwargs should be initialized if from config
def A_ ( self : int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = GenerationConfig()
self.assertEqual(default_config.temperature, 1.0 )
self.assertEqual(default_config.do_sample, _UpperCAmelCase )
self.assertEqual(default_config.num_beams, 1 )
SCREAMING_SNAKE_CASE__ : int = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, bad_words_ids=[[1, 2, 3], [4, 5]], )
self.assertEqual(config.temperature, 0.7 )
self.assertEqual(config.do_sample, _UpperCAmelCase )
self.assertEqual(config.num_beams, 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_pretrained(_UpperCAmelCase, temperature=1.0 )
self.assertEqual(loaded_config.temperature, 1.0 )
self.assertEqual(loaded_config.do_sample, _UpperCAmelCase )
self.assertEqual(loaded_config.num_beams, 1 ) # default value
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def A_ ( cls : str ) -> Any:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-generation-config" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-generation-config-org" )
except HTTPError:
pass
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub("test-generation-config", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : List[str] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id="test-generation-config" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase, repo_id="test-generation-config", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = GenerationConfig.from_pretrained(F'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
def A_ ( self : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = GenerationConfig(
do_sample=_UpperCAmelCase, temperature=0.7, length_penalty=1.0, )
config.push_to_hub("valid_org/test-generation-config-org", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-generation-config-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase, repo_id="valid_org/test-generation-config-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[Any] = GenerationConfig.from_pretrained("valid_org/test-generation-config-org" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase, getattr(_UpperCAmelCase, _UpperCAmelCase ) )
| 191 |
import qiskit
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> qiskit.result.counts.Counts:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE__ : List[Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
SCREAMING_SNAKE_CASE__ : int = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : List[str] = single_qubit_measure(2, 2)
print(f"Total count for various states are: {counts}")
| 191 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.