code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : int=True , UpperCamelCase__ : int=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Tuple=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : str=None , ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
def _lowercase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[str] ) -> str:
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def _lowercase ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = True
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , ) -> Dict:
"""simple docstring"""
__magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = True
__magic_name__ = True
__magic_name__ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__magic_name__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
__magic_name__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ = torch.cat([input_mask, next_mask] , dim=-1 )
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
__magic_name__ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
__magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach()
__magic_name__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
a__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
a__ = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = OpenLlamaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = """single_label_classification"""
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = 3
__magic_name__ = """multi_label_classification"""
__magic_name__ = input_dict["""input_ids"""]
__magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ )
__magic_name__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__magic_name__ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def _lowercase ( self : str ) -> int:
"""simple docstring"""
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _lowercase ( self : Tuple , UpperCamelCase__ : List[Any] ) -> str:
"""simple docstring"""
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ = ids_tensor([1, 10] , config.vocab_size )
__magic_name__ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
__magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state
__magic_name__ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__magic_name__ = {"""type""": scaling_type, """factor""": 10.0}
__magic_name__ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
__magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state
__magic_name__ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-5 ) )
| 88 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 1 |
'''simple docstring'''
lowerCamelCase :Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
A_ : Union[str, Any] = Stack()
A_ : Optional[Any] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
A_ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
A_ : Dict = operand_stack.peek()
operand_stack.pop()
A_ : Optional[int] = operand_stack.peek()
operand_stack.pop()
A_ : List[str] = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase :str = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}") | 366 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase :Tuple = logging.get_logger(__name__)
lowerCamelCase :Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase :int = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase :Tuple = {
'''t5-small''': 5_1_2,
'''t5-base''': 5_1_2,
'''t5-large''': 5_1_2,
'''t5-3b''': 5_1_2,
'''t5-11b''': 5_1_2,
}
lowerCamelCase :str = '''▁'''
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__(self , lowercase , lowercase="</s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase=100 , lowercase=None , lowercase = None , lowercase=True , **lowercase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A_ : Any = [F'<extra_id_{i}>' for i in range(lowercase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A_ : Tuple = len(set(filter(lambda lowercase : bool("""extra_id""" in str(lowercase ) ) , lowercase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
A_ : str = legacy
A_ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase , unk_token=lowercase , pad_token=lowercase , extra_ids=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy=lowercase , **lowercase , )
A_ : List[str] = vocab_file
A_ : Tuple = extra_ids
A_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase )
@staticmethod
def _a (lowercase , lowercase , lowercase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
A_ : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , lowercase , )
return max_model_length
@property
def _a (self ):
return self.sp_model.get_piece_size() + self._extra_ids
def _a (self ):
A_ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a (self , lowercase , lowercase = None , lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowercase )) + [1]
return ([0] * len(lowercase )) + [1] + ([0] * len(lowercase )) + [1]
def _a (self ):
return list(
set(filter(lambda lowercase : bool(re.search(R"""<extra_id_\d+>""" , lowercase ) ) is not None , self.additional_special_tokens ) ) )
def _a (self ):
return [self._convert_token_to_id(lowercase ) for token in self.get_sentinel_tokens()]
def _a (self , lowercase ):
if len(lowercase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _a (self , lowercase , lowercase = None ):
A_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _a (self , lowercase , lowercase = None ):
A_ : Optional[Any] = self._add_eos_if_not_present(lowercase )
if token_ids_a is None:
return token_ids_a
else:
A_ : List[Any] = self._add_eos_if_not_present(lowercase )
return token_ids_a + token_ids_a
def __getstate__(self ):
A_ : int = self.__dict__.copy()
A_ : Tuple = None
return state
def __setstate__(self , lowercase ):
A_ : Tuple = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
A_ : Dict = {}
A_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , lowercase , **lowercase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
A_ : Tuple = SPIECE_UNDERLINE + text.replace(lowercase , """ """ )
return super().tokenize(lowercase , **lowercase )
def _a (self , lowercase , **lowercase ):
if not self.legacy:
A_ : Dict = text.startswith(lowercase )
if is_first:
A_ : str = text[1:]
A_ : Optional[int] = self.sp_model.encode(lowercase , out_type=lowercase )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(lowercase ):
A_ : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _a (self , lowercase ):
if token.startswith("""<extra_id_""" ):
A_ : Union[str, Any] = re.match(R"""<extra_id_(\d+)>""" , lowercase )
A_ : str = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowercase )
def _a (self , lowercase ):
if index < self.sp_model.get_piece_size():
A_ : List[Any] = self.sp_model.IdToPiece(lowercase )
else:
A_ : Dict = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def _a (self , lowercase ):
A_ : Union[str, Any] = []
A_ : int = """"""
A_ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
A_ : Dict = True
A_ : Union[str, Any] = []
else:
current_sub_tokens.append(lowercase )
A_ : Optional[Any] = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def _a (self , lowercase , lowercase = None ):
if not os.path.isdir(lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Optional[Any] = os.path.join(
lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , """wb""" ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,) | 135 | 0 |
import itertools
import math
def __UpperCamelCase ( _A ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCamelCase ( ):
lowerCAmelCase_ = 2
while True:
if is_prime(_A ):
yield num
num += 1
def __UpperCamelCase ( _A = 10001 ):
return next(itertools.islice(prime_generator() , nth - 1 , _A ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 278 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCamelCase ( _A = 3 ):
if isinstance(_A , _A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
lowerCAmelCase_ = QuantumRegister(_A , '''qr''' )
lowerCAmelCase_ = ClassicalRegister(_A , '''cr''' )
lowerCAmelCase_ = QuantumCircuit(_A , _A )
lowerCAmelCase_ = number_of_qubits
for i in range(_A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _A , _A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_A , _A )
# simulate with 10000 shots
lowerCAmelCase_ = Aer.get_backend('''qasm_simulator''' )
lowerCAmelCase_ = execute(_A , _A , shots=10000 )
return job.result().get_counts(_A )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 278 | 1 |
import unittest
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a = None , ):
snake_case_ : List[str] = np.shape(__a )
snake_case_ : Optional[Any] = np.shape(__a )
snake_case_ : List[str] = np.shape(__a )
if shape_a[0] != shape_b[0]:
snake_case_ : List[Any] = (
'Expected the same number of rows for A and B. '
f"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
snake_case_ : Dict = (
'Expected the same number of columns for B and C. '
f"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(__a )
snake_case_ : List[Any] = pseudo_inv
if a_inv is None:
try:
snake_case_ : Union[str, Any] = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
"""simple docstring"""
snake_case_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Optional[Any] = np.array([[2, 1], [6, 3]] )
snake_case_ : str = schur_complement(_A , _A , _A )
snake_case_ : str = np.block([[a, b], [b.T, c]] )
snake_case_ : Optional[int] = np.linalg.det(_A )
snake_case_ : Any = np.linalg.det(_A )
snake_case_ : Any = np.linalg.det(_A )
self.assertAlmostEqual(_A , det_a * det_s )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : Optional[int] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_A ):
schur_complement(_A , _A , _A )
def UpperCAmelCase_ ( self : Optional[int] ) -> None:
"""simple docstring"""
snake_case_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
snake_case_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
snake_case_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_A ):
schur_complement(_A , _A , _A )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 88 |
import argparse
from collections import defaultdict
import yaml
_SCREAMING_SNAKE_CASE = """docs/source/en/_toctree.yml"""
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[Any] = defaultdict(__a )
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(__a )
snake_case_ : Any = new_doc_list
snake_case_ : str = [key for key, value in counts.items() if value > 1]
snake_case_ : Any = []
for duplicate_key in duplicates:
snake_case_ : Any = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
snake_case_ : str = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(__a )
# Sort
return overview_doc
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : int = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : Dict = content[api_idx]['sections']
# Then to the model doc
snake_case_ : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
snake_case_ : Union[str, Any] = api_doc[scheduler_idx]['sections']
snake_case_ : Optional[Any] = clean_doc_toc(__a )
snake_case_ : int = False
if new_scheduler_doc != scheduler_doc:
snake_case_ : int = True
if overwrite:
snake_case_ : Union[str, Any] = new_scheduler_doc
if diff:
if overwrite:
snake_case_ : Optional[int] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def SCREAMING_SNAKE_CASE__ ( __a=False ):
with open(__a , encoding='utf-8' ) as f:
snake_case_ : Dict = yaml.safe_load(f.read() )
# Get to the API doc
snake_case_ : Any = 0
while content[api_idx]["title"] != "API":
api_idx += 1
snake_case_ : str = content[api_idx]['sections']
# Then to the model doc
snake_case_ : List[Any] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
snake_case_ : Dict = False
snake_case_ : Union[str, Any] = api_doc[pipeline_idx]['sections']
snake_case_ : Union[str, Any] = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
snake_case_ : Optional[Any] = pipeline_doc['section']
snake_case_ : Optional[int] = clean_doc_toc(__a )
if overwrite:
snake_case_ : Tuple = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
snake_case_ : Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
snake_case_ : List[str] = True
if overwrite:
snake_case_ : List[str] = new_pipeline_docs
if diff:
if overwrite:
snake_case_ : List[Any] = api_doc
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 88 | 1 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=1_024 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained(__A )
UpperCamelCase = SeqaSeqDataset(__A , __A , __A , __A , type_path="train" , **__A )
UpperCamelCase = tok.pad_token_id
def get_lens(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCamelCase = []
for batch in dl:
UpperCamelCase = batch["input_ids"].ne(__A ).sum(1 ).tolist()
UpperCamelCase = batch["labels"].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCamelCase = get_lens(__A )
UpperCamelCase = SeqaSeqDataset(__A , __A , __A , __A , type_path="val" , **__A )
UpperCamelCase = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 153 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 0 |
'''simple docstring'''
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def lowerCAmelCase__ ( lowerCamelCase : Optional[int]=None ):
_A : Any = argparse.ArgumentParser(add_help=lowerCamelCase ,allow_abbrev=lowerCamelCase )
# The main config parser
_A : Tuple = config_command_parser(lowerCamelCase )
# The subparser to add commands to
_A : str = config_parser.add_subparsers(title='subcommands' ,dest='subcommand' )
# Then add other parsers with the parent parser
default_command_parser(lowerCamelCase ,parents=[parent_parser] )
update_command_parser(lowerCamelCase ,parents=[parent_parser] )
return config_parser
def lowerCAmelCase__ ( ):
_A : Any = get_config_parser()
_A : Any = config_parser.parse_args()
if not hasattr(lowerCamelCase ,'func' ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase )
if __name__ == "__main__":
main()
| 368 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( lowerCamelCase : Optional[int] ):
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : nn.Module , SCREAMING_SNAKE_CASE : int):
super().__init__()
_A : Tuple = module
_A : str = nn.Sequential(
nn.Linear(module.in_features , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE) , nn.Linear(SCREAMING_SNAKE_CASE , module.out_features , bias=SCREAMING_SNAKE_CASE) , )
_A : Any = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=SCREAMING_SNAKE_CASE)
nn.init.zeros_(self.adapter[1].weight)
self.adapter.to(module.weight.device)
def A ( self : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int):
return self.module(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) + self.adapter(SCREAMING_SNAKE_CASE)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
a = "bigscience/bloom-1b7"
# Constant values
a = 2.109_6595_5269_2574
a = "Hello my name is"
a = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
a = 10
def A ( self : List[Any]):
# Models and tokenizer
_A : List[Any] = AutoTokenizer.from_pretrained(self.model_name)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[str]):
super().setUp()
# Models and tokenizer
_A : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto')
_A : str = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : List[Any]):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A ( self : str):
_A : Any = self.model_abit.config
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , 'quantization_config'))
_A : str = config.to_dict()
_A : Dict = config.to_diff_dict()
_A : int = config.to_json_string()
def A ( self : List[str]):
from bitsandbytes.nn import Paramsabit
_A : List[str] = self.model_fpaa.get_memory_footprint()
_A : List[Any] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE)
_A : str = get_some_linear_layer(self.model_abit)
self.assertTrue(linear.weight.__class__ == Paramsabit)
def A ( self : List[Any]):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , torch.nn.Linear):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta)
def A ( self : Any):
_A : List[str] = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : List[str]):
_A : List[Any] = BitsAndBytesConfig()
_A : Optional[int] = True
_A : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt')
_A : Any = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
def A ( self : Union[str, Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(SCREAMING_SNAKE_CASE)
def A ( self : int):
_A : Any = BitsAndBytesConfig()
with self.assertRaises(SCREAMING_SNAKE_CASE):
_A : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=SCREAMING_SNAKE_CASE , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto' , bnb_abit_quant_type='nf4' , )
def A ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with `str`
self.model_abit.to('cpu')
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `dtype``
self.model_abit.to(torch.floataa)
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0'))
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(SCREAMING_SNAKE_CASE):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
_A : str = self.tokenizer(self.input_text , return_tensors='pt')
_A : Optional[Any] = self.model_fpaa.to(torch.floataa)
_A : Optional[int] = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
# Check this does not throw an error
_A : Tuple = self.model_fpaa.to('cpu')
# Check this does not throw an error
_A : Optional[int] = self.model_fpaa.half()
# Check this does not throw an error
_A : int = self.model_fpaa.float()
def A ( self : Dict):
_A : Any = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa)
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def A ( cls : Union[str, Any]):
_A : Tuple = 't5-small'
_A : Union[str, Any] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
_A : str = AutoTokenizer.from_pretrained(cls.model_name)
_A : Optional[int] = 'Translate in German: Hello, my dog is cute'
def A ( self : Any):
gc.collect()
torch.cuda.empty_cache()
def A ( self : List[str]):
from transformers import TaForConditionalGeneration
_A : Union[str, Any] = TaForConditionalGeneration._keep_in_fpaa_modules
_A : List[str] = None
# test with `t5-small`
_A : Optional[int] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : List[str] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : Dict = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Any = model.generate(**SCREAMING_SNAKE_CASE)
_A : List[str] = modules
def A ( self : Dict):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
_A : str = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit))
_A : int = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Union[str, Any] = model.generate(**SCREAMING_SNAKE_CASE)
# test with `flan-t5-small`
_A : Union[str, Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
_A : str = self.tokenizer(self.input_text , return_tensors='pt').to(0)
_A : Optional[int] = model.generate(**SCREAMING_SNAKE_CASE)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Union[str, Any]):
super().setUp()
# model_name
_A : Dict = 'bigscience/bloom-560m'
_A : List[str] = 't5-small'
# Different types of model
_A : Optional[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Sequence classification model
_A : Tuple = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# CausalLM model
_A : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
# Seq2seq model
_A : Dict = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='auto')
def A ( self : str):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit)
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter)
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
super().setUp()
def A ( self : Any):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int]):
_A : Dict = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
_A : Union[str, Any] = self.pipe(self.input_text)
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS)
@require_torch_multi_gpu
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : List[Any]):
super().setUp()
def A ( self : Any):
_A : int = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=SCREAMING_SNAKE_CASE , device_map='balanced')
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1})
# Check that inference pass works on the model
_A : Tuple = self.tokenizer(self.input_text , return_tensors='pt')
# Second real batch
_A : Union[str, Any] = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0) , max_new_tokens=10)
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=SCREAMING_SNAKE_CASE) , self.EXPECTED_OUTPUTS)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
def A ( self : Optional[Any]):
_A : Any = 'facebook/opt-350m'
super().setUp()
def A ( self : List[Any]):
if version.parse(importlib.metadata.version('bitsandbytes')) < version.parse('0.37.0'):
return
# Step 1: freeze all parameters
_A : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=SCREAMING_SNAKE_CASE)
self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()})
for param in model.parameters():
_A : Optional[Any] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
_A : Tuple = param.data.to(torch.floataa)
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(SCREAMING_SNAKE_CASE)):
_A : List[Any] = LoRALayer(module.q_proj , rank=16)
_A : Union[str, Any] = LoRALayer(module.k_proj , rank=16)
_A : str = LoRALayer(module.v_proj , rank=16)
# Step 3: dummy batch
_A : Union[str, Any] = self.tokenizer('Test batch ' , return_tensors='pt').to(0)
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
_A : Union[str, Any] = model.forward(**SCREAMING_SNAKE_CASE)
out.logits.norm().backward()
for module in model.modules():
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
self.assertTrue(module.adapter[1].weight.grad is not None)
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0)
elif isinstance(SCREAMING_SNAKE_CASE , nn.Embedding):
self.assertTrue(module.weight.grad is None)
class __lowerCamelCase ( a_ ):
"""simple docstring"""
a = "gpt2-xl"
a = 3.3191_8548_5415_2187
| 227 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] = 2 , _lowerCamelCase : str = 1 , _lowerCamelCase : Tuple = 3 , ) -> int | None:
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ) -> int:
return (pow(_SCREAMING_SNAKE_CASE , 2 ) + step) % modulus
for _ in range(_SCREAMING_SNAKE_CASE ):
# These track the position within the cycle detection logic.
lowerCamelCase_ = seed
lowerCamelCase_ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase_ = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ = rand_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase_ = gcd(hare - tortoise , _SCREAMING_SNAKE_CASE )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase_ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
_SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
_SCREAMING_SNAKE_CASE : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F'''{args.num} is probably prime''')
else:
_SCREAMING_SNAKE_CASE : Tuple = args.num // divisor
print(F'''{args.num} = {divisor} * {quotient}''')
| 183 |
from collections import defaultdict
from math import gcd
def __lowercase ( _SCREAMING_SNAKE_CASE = 1_50_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = defaultdict(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _SCREAMING_SNAKE_CASE , 2 ):
if gcd(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) > 1:
continue
SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_SCREAMING_SNAKE_CASE , limit + 1 , _SCREAMING_SNAKE_CASE ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 296 | 0 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_SCREAMING_SNAKE_CASE : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
_SCREAMING_SNAKE_CASE : Tuple = 5
_SCREAMING_SNAKE_CASE : Optional[int] = 10
@require_sentencepiece
@require_tokenizers
class A__ ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = SpeechaTextTokenizer
__magic_name__ = False
__magic_name__ = True
def a_ ( self ):
super().setUp()
snake_case = sp.SentencePieceProcessor()
spm_model.Load(__snake_case )
snake_case = ['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__snake_case ) )]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = Path(self.tmpdirname )
save_json(__snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self ):
snake_case = '''<pad>'''
snake_case = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__snake_case ) , __snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__snake_case ) , __snake_case )
def a_ ( self ):
snake_case = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(__snake_case ) , 1_0_0_1 )
def a_ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_1 )
def a_ ( self ):
snake_case = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
snake_case = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__snake_case , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [2_8_9, 5_0, 1_4, 1_7_4, 3_8_6] , )
snake_case = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
snake_case = tokenizer.convert_tokens_to_ids(__snake_case )
self.assertListEqual(__snake_case , [1_2, 2_5, 8_8, 5_9, 2_8, 2_3, 1_1, 4, 6_0_6, 3_5_1, 3_5_1, 3_5_1, 7, 1_6, 7_0, 5_0, 7_6, 8_4, 1_0, 4, 8] )
snake_case = tokenizer.convert_ids_to_tokens(__snake_case )
self.assertListEqual(
__snake_case , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def a_ ( self ):
# fmt: off
snake_case = {'''input_ids''': [[3_7_9_1, 7_9_7, 3_1, 1_1, 6_4, 7_9_7, 3_1, 2_4_2_9, 4_3_3, 1_2, 1_1_7_6, 1_2, 2_0, 7_8_6, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 3_2_3_8, 7_9_7, 3_1, 1_1, 3_5, 9_3, 9_1_5, 1_4_2, 2_4_1_3, 2_4_0, 3_7, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_7, 6_1_0, 4_0, 6_2, 4_5_5, 6_5_7, 1_0_4_2, 1_2_3, 7_8_0, 1_7_7, 3_7, 3_0_9, 2_4_1, 1_2_9_8, 5_1_4, 2_0, 2_9_2, 2_7_3_7, 1_1_4, 2_4_6_9, 2_4_1, 8_5, 6_4, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 4, 5_0_9, 4_0_6, 4_2_3, 3_7, 6_0_1, 4, 7_7_7, 3_0_2, 5_4_8, 5_2_8, 4_2_3, 2_8_4, 4, 3_3_8_8, 5_1_1, 4_5_9, 4, 3_5_5_5, 4_0, 3_2_1, 3_0_2, 7_0_5, 4, 3_3_8_8, 5_1_1, 5_8_3, 3_2_6, 5, 5, 5, 6_2, 3_3_1_0, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 3_2, 3_1, 8_5_3, 4_1_8, 6_4, 5_8_3, 5_1_1, 1_6_0_5, 6_2, 3_5, 9_3, 5_6_0, 1_7_7, 2_6_8_0, 2_1_7, 1_5_0_8, 1_5_2_1, 6_4, 5_8_3, 5_1_1, 5_1_9, 6_2, 2_0, 1_5_1_5, 7_6_4, 2_0, 1_4_9, 2_6_1, 5_6_2_5, 7_9_7_2, 2_0, 5_5_4_0, 5_6_7, 1_2_7_6, 9_3, 3_9_2_5, 1_6_7_5, 1_1, 1_5, 8_0_2, 7_9_7_2, 5_7_6, 2_1_7, 1_5_0_8, 1_1, 3_5, 9_3, 1_2_5_3, 2_4_4_1, 1_5, 2_8_9, 6_5_2, 3_1, 4_1_6, 3_2_1, 3_8_4_2, 1_1_5, 4_0, 9_1_1, 8, 4_7_6, 6_1_9, 4, 3_8_0, 1_4_2, 4_2_3, 3_3_5, 2_4_0, 3_5, 9_3, 2_6_4, 8, 1_1, 3_3_5, 5_6_9, 4_2_0, 1_6_3, 5, 2], [2_6_0, 5_4_8, 5_2_8, 4_2_3, 2_0, 4_5_1, 2_0, 2_6_8_1, 1_1_5_3, 3_4_3_4, 2_0, 5_5_4_0, 3_7, 5_6_7, 1_2_6, 1_2_5_3, 2_4_4_1, 3_3_7_6, 4_4_9, 2_1_0, 4_3_1, 1_5_6_3, 1_7_7, 7_6_7, 5_5_4_0, 1_1, 1_2_0_3, 4_7_2, 1_1, 2_9_5_3, 6_8_5, 2_8_5, 3_6_4, 7_0_6, 1_1_5_3, 2_0, 6_7_9_9, 2_0, 2_8_6_9, 2_0, 4_4_6_4, 1_2_6, 4_0, 2_4_2_9, 2_0, 1_0_4_0, 8_6_6, 2_6_6_4, 4_1_8, 2_0, 3_1_8, 2_0, 1_7_2_6, 1_8_6, 2_0, 2_6_5, 5_2_2, 3_5, 9_3, 2_1_9_1, 4_6_3_4, 2_0, 1_0_4_0, 1_2, 6_7_9_9, 1_5, 2_2_8, 2_3_5_6, 1_4_2, 3_1, 1_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_7_5, 2_6_6_6, 6_8_4, 1_5_8_2, 1_1_7_6, 1_2, 6_2_7, 1_4_9, 6_1_9, 2_0, 4_9_0_2, 5_6_3, 1_1, 2_0, 1_4_9, 2_6_1, 3_4_2_0, 2_3_5_6, 1_7_4, 1_4_2, 4_7_1_4, 1_3_1, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__snake_case , model_name='''facebook/s2t-small-mustc-en-de-st''' , revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 'valhalla/s2t_mustc_multilinguial_medium'
__magic_name__ = 'C\'est trop cool'
__magic_name__ = 'Esto es genial'
@classmethod
def a_ ( cls ):
snake_case = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a_ ( self ):
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''] , 1_1 )
def a_ ( self ):
self.assertEqual(self.tokenizer.vocab_size , 1_0_0_0_0 )
def a_ ( self ):
self.assertIn(__snake_case , self.tokenizer.all_special_ids )
snake_case = [ES_CODE, 4, 1_6_0_1, 4_7, 7_6_4_7, 2]
snake_case = self.tokenizer.decode(__snake_case , skip_special_tokens=__snake_case )
snake_case = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
self.assertNotIn(self.tokenizer.eos_token , __snake_case )
def a_ ( self ):
snake_case = '''fr'''
snake_case = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __snake_case )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a_ ( self ):
snake_case = '''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
snake_case = '''es'''
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 353 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'detr'
__magic_name__ = ['past_key_values']
__magic_name__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __snake_case=True , __snake_case=None , __snake_case=3 , __snake_case=1_0_0 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=6 , __snake_case=2_0_4_8 , __snake_case=8 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=True , __snake_case="relu" , __snake_case=2_5_6 , __snake_case=0.1 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1.0 , __snake_case=False , __snake_case="sine" , __snake_case="resnet50" , __snake_case=True , __snake_case=False , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=1 , __snake_case=1 , __snake_case=5 , __snake_case=2 , __snake_case=0.1 , **__snake_case , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(__snake_case , __snake_case ):
snake_case = backbone_config.get('''model_type''' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(__snake_case )
# set timm attributes to None
snake_case , snake_case , snake_case = None, None, None
snake_case = use_timm_backbone
snake_case = backbone_config
snake_case = num_channels
snake_case = num_queries
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = init_xavier_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = encoder_layers
snake_case = auxiliary_loss
snake_case = position_embedding_type
snake_case = backbone
snake_case = use_pretrained_backbone
snake_case = dilation
# Hungarian matcher
snake_case = class_cost
snake_case = bbox_cost
snake_case = giou_cost
# Loss coefficients
snake_case = mask_loss_coefficient
snake_case = dice_loss_coefficient
snake_case = bbox_loss_coefficient
snake_case = giou_loss_coefficient
snake_case = eos_coefficient
super().__init__(is_encoder_decoder=__snake_case , **__snake_case )
@property
def a_ ( self ):
return self.encoder_attention_heads
@property
def a_ ( self ):
return self.d_model
@classmethod
def a_ ( cls , __snake_case , **__snake_case ):
return cls(backbone_config=__snake_case , **__snake_case )
def a_ ( self ):
snake_case = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def a_ ( self ):
return 1E-5
@property
def a_ ( self ):
return 1_2
| 213 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def __snake_case ( self : Tuple):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __snake_case ( self : Optional[int]):
a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
a : Any = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
a : Any = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
a : Dict = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __snake_case ( self : Optional[int]):
a : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
a : Optional[Any] = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
a : Dict = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
a : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
a : Optional[int] = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __snake_case ( self : int):
a : int = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
a : Tuple = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
@slow
@require_torch
def __snake_case ( self : Dict):
a : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(__UpperCAmelCase)
@slow
@require_tf
def __snake_case ( self : Optional[int]):
a : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Optional[int]):
a : Tuple = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
a : List[str] = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
a : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __snake_case ( self : Union[str, Any]):
a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
a : Dict = None
a : str = None
self.run_pipeline_test(__UpperCAmelCase , [])
@require_tf
def __snake_case ( self : Tuple):
a : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
a : Tuple = None
a : Optional[Any] = None
self.run_pipeline_test(__UpperCAmelCase , [])
def __snake_case ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
a : Optional[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Dict = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = fill_masker.tokenizer
a : Union[str, Any] = fill_masker.model
a : Union[str, Any] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''])
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : List[str] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''])
self.assertEqual(
__UpperCAmelCase , [
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
] , )
with self.assertRaises(__UpperCAmelCase):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase):
fill_masker("This is")
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase)
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase)
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase)
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase)
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int):
a : str = tokenizer.get_vocab()
a : Any = sorted(vocab.keys())[:2]
# Pipeline argument
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase)
a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''')
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : int = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase)
a : Tuple = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase))
# Call argument
a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : Any = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase)
a : List[Any] = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase))
# Score equivalence
a : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
a : Optional[Any] = [top_mask["token_str"] for top_mask in outputs]
a : List[Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase) == set(__UpperCAmelCase):
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
a : List[str] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
# Raises with invalid
with self.assertRaises(__UpperCAmelCase):
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase):
a : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""])
with self.assertRaises(__UpperCAmelCase):
a : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="")
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str):
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2)
a : str = fill_masker(f'''This is a {tokenizer.mask_token}''')
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2)
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str):
a : Tuple = tokenizer.get_vocab()
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
# top_k=2, ntargets=3
a : Dict = sorted(vocab.keys())[:3]
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__UpperCAmelCase)
# If we use the most probably targets, and filter differently, we should still
# have the same results
a : Union[str, Any] = [el["token_str"] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase).issubset(__UpperCAmelCase):
a : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__UpperCAmelCase)
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any):
a : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : str = tokenizer.get_vocab()
# String duplicates + id duplicates
a : List[Any] = sorted(vocab.keys())[:3]
a : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
a : Union[str, Any] = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=__UpperCAmelCase , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase) , 3)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]):
a : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Tuple = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2)
self.assertEqual(
__UpperCAmelCase , [
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
] , )
| 40 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=3_2 ,A__=2 ,A__=3 ,A__=1_6 ,A__=[3_2, 6_4, 1_2_8] ,A__=[1, 2, 1] ,A__=[2, 2, 4] ,A__=2 ,A__=2.0 ,A__=True ,A__=0.0 ,A__=0.0 ,A__=0.1 ,A__="gelu" ,A__=False ,A__=True ,A__=0.02 ,A__=1E-5 ,A__=True ,A__=None ,A__=True ,A__=1_0 ,A__=8 ,A__=["stage1", "stage2"] ,A__=[1, 2] ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = embed_dim
lowercase = hidden_sizes
lowercase = depths
lowercase = num_heads
lowercase = window_size
lowercase = mlp_ratio
lowercase = qkv_bias
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = drop_path_rate
lowercase = hidden_act
lowercase = use_absolute_embeddings
lowercase = patch_norm
lowercase = layer_norm_eps
lowercase = initializer_range
lowercase = is_training
lowercase = scope
lowercase = use_labels
lowercase = type_sequence_label_size
lowercase = encoder_stride
lowercase = out_features
lowercase = out_indices
def A__ ( self):
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = FocalNetModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
lowercase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
lowercase = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim))
def A__ ( self ,A__ ,A__ ,A__):
lowercase = FocalNetBackbone(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) ,len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) ,[self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels) ,len(config.out_features))
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1])
# verify backbone works with out_features=None
lowercase = None
lowercase = FocalNetBackbone(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) ,1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) ,[self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) ,1)
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]])
def A__ ( self ,A__ ,A__ ,A__):
lowercase = FocalNetForMaskedImageModeling(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
lowercase = 1
lowercase = FocalNetForMaskedImageModeling(A__)
model.to(A__)
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase = model(A__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def A__ ( self ,A__ ,A__ ,A__):
lowercase = self.type_sequence_label_size
lowercase = FocalNetForImageClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
lowercase = 1
lowercase = FocalNetForImageClassification(A__)
model.to(A__)
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
lowercase = model(A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Tuple =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowercase_ : Dict =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : Any =False
lowercase_ : Optional[Any] =False
lowercase_ : Optional[Any] =False
lowercase_ : List[Any] =False
lowercase_ : Optional[Any] =False
def A__ ( self):
lowercase = FocalNetModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,embed_dim=3_7 ,has_text_modality=A__)
def A__ ( self):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self):
return
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__)
@unittest.skip(reason='''FocalNet does not use inputs_embeds''')
def A__ ( self):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''')
def A__ ( self):
pass
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase = model_class(A__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ ,nn.Linear))
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase = model_class(A__)
lowercase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
lowercase = model_class(A__)
model.to(A__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ ,A__))
lowercase = outputs.hidden_states
lowercase = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths) + 1)
self.assertEqual(len(A__) ,A__)
# FocalNet has a different seq_length
lowercase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase = outputs.reshaped_hidden_states
self.assertEqual(len(A__) ,A__)
lowercase , lowercase , lowercase , lowercase = reshaped_hidden_states[0].shape
lowercase = (
reshaped_hidden_states[0].view(A__ ,A__ ,height * width).permute(0 ,2 ,1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) ,[num_patches, self.model_tester.embed_dim] ,)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,A__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = 3
lowercase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
lowercase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,(padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
self.check_hidden_states_output(A__ ,A__ ,A__ ,(padded_height, padded_width))
@slow
def A__ ( self):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = FocalNetModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = _config_zero_init(A__)
for model_class in self.all_model_classes:
lowercase = model_class(config=A__)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''') if is_vision_available() else None
@slow
def A__ ( self):
lowercase = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''').to(A__)
lowercase = self.default_image_processor
lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
lowercase = image_processor(images=A__ ,return_tensors='''pt''').to(A__)
# forward pass
with torch.no_grad():
lowercase = model(**A__)
# verify the logits
lowercase = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,A__)
lowercase = torch.tensor([0.2166, -0.4368, 0.2191]).to(A__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item() ,2_8_1)
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : str =(FocalNetBackbone,) if is_torch_available() else ()
lowercase_ : Union[str, Any] =FocalNetConfig
lowercase_ : Union[str, Any] =False
def A__ ( self):
lowercase = FocalNetModelTester(self)
| 364 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self ,A__ ,A__=9_9 ,A__=1_3 ,A__=1_6 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=False ,A__=True ,A__=2 ,A__=3_2 ,A__=4 ,A__=4 ,A__=3_0 ,A__=0 ,A__=1 ,A__=2 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = decoder_seq_length
# For common tests
lowercase = self.decoder_seq_length
lowercase = is_training
lowercase = use_attention_mask
lowercase = use_labels
lowercase = vocab_size
lowercase = d_model
lowercase = d_model
lowercase = decoder_layers
lowercase = decoder_layers
lowercase = decoder_ffn_dim
lowercase = decoder_attention_heads
lowercase = decoder_attention_heads
lowercase = eos_token_id
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = decoder_start_token_id
lowercase = use_cache
lowercase = max_position_embeddings
lowercase = None
lowercase = decoder_seq_length
lowercase = 2
lowercase = 1
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = None
if self.use_attention_mask:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,vocab_size=2)
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size, self.decoder_seq_length] ,self.vocab_size)
lowercase = TrOCRConfig(
vocab_size=self.vocab_size ,d_model=self.d_model ,decoder_layers=self.decoder_layers ,decoder_ffn_dim=self.decoder_ffn_dim ,decoder_attention_heads=self.decoder_attention_heads ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,use_cache=self.use_cache ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.decoder_start_token_id ,max_position_embeddings=self.max_position_embeddings ,)
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,):
lowercase = True
lowercase = TrOCRDecoder(config=A__).to(A__).eval()
lowercase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
lowercase = model(A__ ,use_cache=A__)
lowercase = model(A__)
lowercase = model(A__ ,use_cache=A__)
self.parent.assertTrue(len(A__) == len(A__))
self.parent.assertTrue(len(A__) == len(A__) + 1)
lowercase = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
lowercase = ids_tensor((2, 1) ,config.vocab_size - 1) + 1
# append to next input_ids and
lowercase = torch.cat([input_ids, next_tokens] ,dim=-1)
lowercase = model(A__)['''last_hidden_state''']
lowercase = model(A__ ,past_key_values=A__)['''last_hidden_state''']
# select random slice
lowercase = ids_tensor((1,) ,output_from_past.shape[-1]).item()
lowercase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
lowercase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(A__ ,A__ ,atol=1E-3)
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Any =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowercase_ : Dict =(TrOCRForCausalLM,) if is_torch_available() else ()
lowercase_ : int ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowercase_ : List[Any] =True
lowercase_ : int =False
def A__ ( self):
lowercase = TrOCRStandaloneDecoderModelTester(self ,is_training=A__)
lowercase = ConfigTester(self ,config_class=A__)
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
pass
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*A__)
def A__ ( self):
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def A__ ( self):
pass
| 97 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181 | 1 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['speech']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
class A ( metaclass=__UpperCAmelCase ):
__snake_case = ['speech']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''speech'''] )
| 167 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A ( unittest.TestCase ):
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('''DownBlock2D''', '''AttnDownBlock2D'''), up_block_types=('''AttnUpBlock2D''', '''UpBlock2D'''), )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=2, generator=UpperCamelCase__, output_type='''numpy''', return_dict=UpperCamelCase__ )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase_ = UNetaDModel.from_pretrained(UpperCamelCase__ )
lowerCAmelCase_ = KarrasVeScheduler()
lowerCAmelCase_ = KarrasVePipeline(unet=UpperCamelCase__, scheduler=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(num_inference_steps=20, generator=UpperCamelCase__, output_type='''numpy''' ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase_ = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 167 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def _A ( A__ ):
"""simple docstring"""
__lowercase = R'''\w+[.]\d+'''
__lowercase = re.findall(A__ , A__ )
for pat in pats:
__lowercase = key.replace(A__ , '''_'''.join(pat.split('''.''' ) ) )
return key
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
__lowercase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
__lowercase = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
__lowercase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__lowercase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
__lowercase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__lowercase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__lowercase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _A ( A__ , A__ , A__=42 ):
"""simple docstring"""
__lowercase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
__lowercase = flax_model.init_weights(PRNGKey(A__ ) )
__lowercase = flatten_dict(A__ )
__lowercase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__lowercase = rename_key(A__ )
__lowercase = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
__lowercase , __lowercase = rename_key_and_reshape_tensor(A__ , A__ , A__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
__lowercase = jnp.asarray(A__ )
return unflatten_dict(A__ )
| 104 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 1 |
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"{price_plus_tax(100, 0.25) = }")
print(F"{price_plus_tax(1_25.50, 0.05) = }")
| 41 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : Optional[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class _snake_case ( snake_case ):
UpperCamelCase__ = 'transfo-xl'
UpperCamelCase__ = ['mems']
UpperCamelCase__ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=267_735 , _a=[20_000, 40_000, 200_000] , _a=1_024 , _a=1_024 , _a=16 , _a=64 , _a=4_096 , _a=4 , _a=False , _a=18 , _a=1_600 , _a=1_000 , _a=True , _a=True , _a=0 , _a=-1 , _a=True , _a=0.1 , _a=0.0 , _a=True , _a="normal" , _a=0.01 , _a=0.01 , _a=0.02 , _a=1e-5 , _a=0 , **_a , ):
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Dict = []
self.cutoffs.extend(_a )
if proj_share_all_but_first:
__magic_name__ : List[str] = [False] + [True] * len(self.cutoffs )
else:
__magic_name__ : Optional[Any] = [False] + [False] * len(self.cutoffs )
__magic_name__ : Optional[int] = d_model
__magic_name__ : str = d_embed
__magic_name__ : Optional[Any] = d_head
__magic_name__ : Optional[int] = d_inner
__magic_name__ : List[str] = div_val
__magic_name__ : List[str] = pre_lnorm
__magic_name__ : Union[str, Any] = n_layer
__magic_name__ : Optional[int] = n_head
__magic_name__ : str = mem_len
__magic_name__ : int = same_length
__magic_name__ : Dict = attn_type
__magic_name__ : int = clamp_len
__magic_name__ : Optional[int] = sample_softmax
__magic_name__ : List[Any] = adaptive
__magic_name__ : Optional[int] = dropout
__magic_name__ : Optional[int] = dropatt
__magic_name__ : Optional[Any] = untie_r
__magic_name__ : List[str] = init
__magic_name__ : Any = init_range
__magic_name__ : Optional[int] = proj_init_std
__magic_name__ : List[Any] = init_std
__magic_name__ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=_a , **_a )
@property
def SCREAMING_SNAKE_CASE ( self ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE ( self , _a ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 41 | 1 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
_lowercase: Union[str, Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_=16 , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=14 , lowerCamelCase_=10 , lowerCamelCase_=19 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=True , lowerCamelCase_=16 , lowerCamelCase_=2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=[1, 2, 3, 4, 5] , lowerCamelCase_=25 , lowerCamelCase_=5 , ):
"""simple docstring"""
a = d_model
a = parent
a = batch_size
a = prediction_length
a = context_length
a = cardinality
a = num_time_features
a = lags_sequence
a = embedding_dimension
a = is_training
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = context_length
a = prediction_length + label_length
a = label_length
a = moving_average
a = autocorrelation_factor
def UpperCamelCase_ (self ):
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = config.context_length + max(config.lags_sequence )
a = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
a = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
a = floats_tensor([self.batch_size, _past_length] )
a = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
a = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
a = floats_tensor([self.batch_size, config.prediction_length] )
a = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.get_config()
a = self.prepare_autoformer_inputs_dict(lowerCamelCase_ )
return config, inputs_dict
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = AutoformerModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).eval()
a = model(**lowerCamelCase_ )
a = outputs.encoder_last_hidden_state
a = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
a = model.get_encoder()
encoder.save_pretrained(lowerCamelCase_ )
a = AutoformerEncoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
a , a , a , a , a = model.create_network_inputs(**lowerCamelCase_ )
a , a = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
a = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
a = encoder(inputs_embeds=lowerCamelCase_ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
a = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
a = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
a = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
a = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
a = model.get_decoder()
decoder.save_pretrained(lowerCamelCase_ )
a = AutoformerDecoder.from_pretrained(lowerCamelCase_ ).to(lowerCamelCase_ )
a = decoder(
trend=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class _lowercase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
__A = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__A = (AutoformerForPrediction,) if is_torch_available() else ()
__A = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
__A = False
__A = False
__A = False
__A = False
__A = False
__A = False
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoformerModelTester(self )
a = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
a = model_class(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
a , a = model_class.from_pretrained(lowerCamelCase_ , output_loading_info=lowerCamelCase_ )
self.assertEqual(info["missing_keys"] , [] )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCamelCase_ )
@unittest.skip(reason="Model has no tokens embeddings" )
def UpperCamelCase_ (self ):
"""simple docstring"""
pass
def UpperCamelCase_ (self ):
"""simple docstring"""
a = inspect.signature(getattr(lowerCamelCase_ , "forward" ) )
# The main input is the name of the argument after `self`
a = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a = model_class(lowerCamelCase_ )
a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a = [*signature.parameters.keys()]
a = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCamelCase_ )] , lowerCamelCase_ )
def UpperCamelCase_ (self ):
"""simple docstring"""
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = True
a = getattr(self.model_tester , "seq_length" , lowerCamelCase_ )
a = getattr(self.model_tester , "decoder_seq_length" , lowerCamelCase_ )
a = getattr(self.model_tester , "encoder_seq_length" , lowerCamelCase_ )
a = getattr(self.model_tester , "d_model" , lowerCamelCase_ )
a = getattr(self.model_tester , "num_attention_heads" , lowerCamelCase_ )
a = d_model // num_attention_heads
for model_class in self.all_model_classes:
a = True
a = False
a = True
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a = True
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
a = outputs.encoder_attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
a = len(lowerCamelCase_ )
a = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
# decoder attentions
a = outputs.decoder_attentions
self.assertIsInstance(lowerCamelCase_ , (list, tuple) )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
a = outputs.cross_attentions
self.assertIsInstance(lowerCamelCase_ , (list, tuple) )
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
with torch.no_grad():
a = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
self.assertEqual(out_len + 2 , len(lowerCamelCase_ ) )
a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCamelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def a( A : int="train-batch.pt" ) -> Dict:
"""simple docstring"""
a = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" , filename=A , repo_type="dataset" )
a = torch.load(A , map_location=A )
return batch
@require_torch
@slow
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase_ )
a = prepare_batch()
with torch.no_grad():
a = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
a = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCamelCase_ )
a = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase_ )
a = prepare_batch("val-batch.pt" )
with torch.no_grad():
a = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
a = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCamelCase_ )
a = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowerCamelCase_ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def UpperCamelCase_ (self ):
"""simple docstring"""
a = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCamelCase_ )
a = prepare_batch("val-batch.pt" )
with torch.no_grad():
a = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
a = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCamelCase_ )
a = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCamelCase_ )
a = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCamelCase_ , rtol=1E-1 ) )
| 227 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_lowercase: Any = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCamelCase_ )
def UpperCamelCase_ (self , **lowerCamelCase_ ):
"""simple docstring"""
a = {}
a = {}
a = {}
# preprocess args
if "points_per_batch" in kwargs:
a = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
a = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
a = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
a = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
a = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
a = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
a = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
a = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
a = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
a = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
a = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
a = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , *lowerCamelCase_ , num_workers=lowerCamelCase_ , batch_size=lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=64 , lowerCamelCase_ = 0 , lowerCamelCase_ = 512 / 1500 , lowerCamelCase_ = 32 , lowerCamelCase_ = 1 , ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
a = self.image_processor.size["longest_edge"]
a , a , a , a = self.image_processor.generate_crop_boxes(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = self.image_processor(images=lowerCamelCase_ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
a = self.get_inference_context()
with inference_context():
a = self._ensure_tensor_on_device(lowerCamelCase_ , device=self.device )
a = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
a = image_embeddings
a = grid_points.shape[1]
a = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCamelCase_ , lowerCamelCase_ ):
a = grid_points[:, i : i + points_per_batch, :, :]
a = input_labels[:, i : i + points_per_batch]
a = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0.88 , lowerCamelCase_=0.95 , lowerCamelCase_=0 , lowerCamelCase_=1 , ):
"""simple docstring"""
a = model_inputs.pop("input_boxes" )
a = model_inputs.pop("is_last" )
a = model_inputs.pop("original_sizes" ).tolist()
a = model_inputs.pop("reshaped_input_sizes" ).tolist()
a = self.model(**lowerCamelCase_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
a = model_outputs["pred_masks"]
a = self.image_processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , binarize=lowerCamelCase_ )
a = model_outputs["iou_scores"]
a , a , a = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=0.7 , ):
"""simple docstring"""
a = []
a = []
a = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
a = torch.cat(lowerCamelCase_ )
a = torch.cat(lowerCamelCase_ )
a , a , a , a = self.image_processor.post_process_for_mask_generation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
a = defaultdict(lowerCamelCase_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase_ )
a = {}
if output_rle_mask:
a = rle_mask
if output_bboxes_mask:
a = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 227 | 1 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return (-y * np.log(__lowerCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = np.dot(__lowerCamelCase , __lowerCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(__lowerCamelCase ) ) )
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any]=7_0000 ):
'''simple docstring'''
_UpperCAmelCase = np.zeros(x.shape[1] )
for iterations in range(__lowerCamelCase ):
_UpperCAmelCase = np.dot(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = sigmoid_function(__lowerCamelCase )
_UpperCAmelCase = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase = theta - alpha * gradient # updating the weights
_UpperCAmelCase = np.dot(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase = sigmoid_function(__lowerCamelCase )
_UpperCAmelCase = cost_function(__lowerCamelCase , __lowerCamelCase )
if iterations % 100 == 0:
print(f'loss: {j} \t' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__A : Any = datasets.load_iris()
__A : List[Any] = iris.data[:, :2]
__A : Tuple = (iris.target != 0) * 1
__A : Optional[int] = 0.1
__A : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70000)
print("theta: ", theta) # printing the theta i.e our weights vector
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return sigmoid_function(
np.dot(__lowerCamelCase , __lowerCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="b", label="0")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="r", label="1")
((__A) , (__A)) : Optional[int] = (x[:, 0].min(), x[:, 0].max())
((__A) , (__A)) : List[str] = (x[:, 1].min(), x[:, 1].max())
((__A) , (__A)) : str = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__A : List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
__A : Any = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="black")
plt.legend()
plt.show()
| 360 |
"""simple docstring"""
class _a :
"""simple docstring"""
def __init__( self : Tuple , __UpperCamelCase : list[int] )->None:
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = [0] * len_array
if len_array > 0:
_UpperCAmelCase = array[0]
for i in range(1 , __UpperCamelCase ):
_UpperCAmelCase = self.prefix_sum[i - 1] + array[i]
def lowercase__ ( self : Any , __UpperCamelCase : int , __UpperCamelCase : int )->int:
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowercase__ ( self : List[Any] , __UpperCamelCase : int )->bool:
_UpperCAmelCase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(__UpperCamelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _a ( UpperCAmelCase , UpperCAmelCase=None ) -> int:
"""simple docstring"""
lowerCamelCase__ : Any = None
if token is not None:
lowerCamelCase__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
lowerCamelCase__ : str = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
lowerCamelCase__ : Tuple = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
lowerCamelCase__ : List[str] = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
lowerCamelCase__ : List[str] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCAmelCase__ ):
lowerCamelCase__ : Union[str, Any] = requests.get(url + f"&page={i + 2}" , headers=UpperCAmelCase__ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _a ( UpperCAmelCase , UpperCAmelCase=None ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = None
if token is not None:
lowerCamelCase__ : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
lowerCamelCase__ : List[Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
lowerCamelCase__ : List[Any] = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ ).json()
lowerCamelCase__ : str = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
lowerCamelCase__ : Any = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(UpperCAmelCase__ ):
lowerCamelCase__ : Any = requests.get(url + f"&page={i + 2}" , headers=UpperCAmelCase__ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : str = None
if token is not None:
lowerCamelCase__ : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"Bearer {token}"}
lowerCamelCase__ : Dict = requests.get(UpperCAmelCase__ , headers=UpperCAmelCase__ , allow_redirects=UpperCAmelCase__ )
lowerCamelCase__ : List[str] = result.headers["""Location"""]
lowerCamelCase__ : Tuple = requests.get(UpperCAmelCase__ , allow_redirects=UpperCAmelCase__ )
lowerCamelCase__ : Optional[Any] = os.path.join(UpperCAmelCase__ , f"{artifact_name}.zip" )
with open(UpperCAmelCase__ , '''wb''' ) as fp:
fp.write(response.content )
def _a ( UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : int = []
lowerCamelCase__ : List[Any] = []
lowerCamelCase__ : Tuple = None
with zipfile.ZipFile(UpperCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCAmelCase__ ) as f:
for line in f:
lowerCamelCase__ : List[str] = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowerCamelCase__ : List[str] = line[: line.index(''': ''' )]
lowerCamelCase__ : int = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
lowerCamelCase__ : int = line[len('''FAILED ''' ) :]
failed_tests.append(UpperCAmelCase__ )
elif filename == "job_name.txt":
lowerCamelCase__ : List[Any] = line
if len(UpperCAmelCase__ ) != len(UpperCAmelCase__ ):
raise ValueError(
f"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase__ )} for `errors` "
f"and {len(UpperCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
''' problem.''' )
lowerCamelCase__ : Optional[int] = None
if job_name and job_links:
lowerCamelCase__ : Tuple = job_links.get(UpperCAmelCase__ , UpperCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
lowerCamelCase__ : Tuple = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
return result
def _a ( UpperCAmelCase , UpperCAmelCase=None ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[str] = [os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) for p in os.listdir(UpperCAmelCase__ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCAmelCase__ , job_links=UpperCAmelCase__ ) )
return errors
def _a ( UpperCAmelCase , UpperCAmelCase=None ) -> int:
"""simple docstring"""
lowerCamelCase__ : Any = Counter()
counter.update([x[1] for x in logs] )
lowerCamelCase__ : Tuple = counter.most_common()
lowerCamelCase__ : Dict = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowerCamelCase__ : Any = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowerCamelCase__ : Union[str, Any] = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=UpperCAmelCase__ ) )
return r
def _a ( UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
lowerCamelCase__ : Optional[Any] = test.split('''/''' )[2]
else:
lowerCamelCase__ : int = None
return test
def _a ( UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowerCamelCase__ : Optional[Any] = [x for x in logs if x[2] is not None]
lowerCamelCase__ : str = {x[2] for x in logs}
lowerCamelCase__ : Optional[Any] = {}
for test in tests:
lowerCamelCase__ : Optional[int] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowerCamelCase__ : Dict = counter.most_common()
lowerCamelCase__ : List[str] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowerCamelCase__ : List[Any] = sum(error_counts.values() )
if n_errors > 0:
lowerCamelCase__ : int = {"""count""": n_errors, """errors""": error_counts}
lowerCamelCase__ : List[Any] = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=UpperCAmelCase__ ) )
return r
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : List[Any] = """| no. | error | status |"""
lowerCamelCase__ : Any = """|-:|:-|:-|"""
lowerCamelCase__ : Any = [header, sep]
for error in reduced_by_error:
lowerCamelCase__ : Optional[Any] = reduced_by_error[error]["""count"""]
lowerCamelCase__ : List[Any] = f"| {count} | {error[:100]} | |"
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = """| model | no. of errors | major error | count |"""
lowerCamelCase__ : Optional[int] = """|-:|-:|-:|-:|"""
lowerCamelCase__ : Optional[int] = [header, sep]
for model in reduced_by_model:
lowerCamelCase__ : Dict = reduced_by_model[model]["""count"""]
lowerCamelCase__ : str = list(reduced_by_model[model]['''errors'''].items() )[0]
lowerCamelCase__ : Optional[int] = f"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCAmelCase__ )
return "\n".join(UpperCAmelCase__ )
if __name__ == "__main__":
_A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
parser.add_argument(
'--output_dir',
type=str,
required=True,
help='Where to store the downloaded artifacts and other result files.',
)
parser.add_argument('--token', default=None, type=str, help='A token that has actions:read permission.')
_A : List[str] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_A : Tuple = get_job_links(args.workflow_run_id, token=args.token)
_A : Any = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_A : Tuple = k.find(' / ')
_A : int = k[index + len(' / ') :]
_A : Optional[Any] = v
with open(os.path.join(args.output_dir, 'job_links.json'), 'w', encoding='UTF-8') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_A : List[str] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, 'artifacts.json'), 'w', encoding='UTF-8') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_A : str = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_A : List[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_A : Any = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, 'errors.json'), 'w', encoding='UTF-8') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_A : Dict = reduce_by_error(errors)
_A : int = reduce_by_model(errors)
_A : str = make_github_table(reduced_by_error)
_A : int = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, 'reduced_by_error.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, 'reduced_by_model.txt'), 'w', encoding='UTF-8') as fp:
fp.write(sa)
| 142 | '''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_text_model'''
def __init__( self : Union[str, Any] , lowercase_ : str=250002 , lowercase_ : Union[str, Any]=1024 , lowercase_ : Any=24 , lowercase_ : Union[str, Any]=16 , lowercase_ : Any=4096 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : int=514 , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=0.02 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=1E-05 , lowercase_ : List[str]=1 , lowercase_ : List[Any]=0 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]="absolute" , lowercase_ : Any=True , lowercase_ : Union[str, Any]=768 , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : str = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : str = hidden_act
lowercase_ : List[str] = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : str = max_position_embeddings
lowercase_ : List[str] = type_vocab_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[Any] = initializer_factor
lowercase_ : str = layer_norm_eps
lowercase_ : Tuple = position_embedding_type
lowercase_ : List[Any] = use_cache
lowercase_ : Tuple = project_dim
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip_vision_model'''
def __init__( self : Dict , lowercase_ : Any=768 , lowercase_ : Dict=3072 , lowercase_ : Optional[Any]=512 , lowercase_ : Dict=12 , lowercase_ : Optional[int]=12 , lowercase_ : Optional[Any]=3 , lowercase_ : str=224 , lowercase_ : List[Any]=32 , lowercase_ : Union[str, Any]="quick_gelu" , lowercase_ : Dict=1E-5 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[Any]=1.0 , **lowercase_ : Dict , ):
super().__init__(**lowercase_ )
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : Optional[Any] = projection_dim
lowercase_ : Tuple = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = num_channels
lowercase_ : Any = patch_size
lowercase_ : Dict = image_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : str = initializer_factor
lowercase_ : Any = attention_dropout
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : int = hidden_act
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , lowercase_ : Union[str, os.PathLike] , **lowercase_ : Any ):
cls._set_token_in_kwargs(lowercase_ )
lowercase_ , lowercase_ : str = cls.get_config_dict(lowercase_ , **lowercase_ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowercase_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowercase_ , **lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''altclip'''
UpperCamelCase__ = True
def __init__( self : Optional[int] , lowercase_ : Dict=None , lowercase_ : List[Any]=None , lowercase_ : Tuple=768 , lowercase_ : List[str]=2.65_92 , **lowercase_ : List[Any] ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
lowercase_ : Dict = kwargs.pop("""text_config_dict""" , lowercase_ )
lowercase_ : str = kwargs.pop("""vision_config_dict""" , lowercase_ )
super().__init__(**lowercase_ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowercase_ : Dict = {}
# This is the complete result when using `text_config_dict`.
lowercase_ : List[str] = AltCLIPTextConfig(**lowercase_ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowercase_ : Optional[Any] = (
f'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. '''
f'''The value `text_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : Tuple = (
f'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '''
f'''value `text_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowercase_ : int = {}
# This is the complete result when using `vision_config_dict`.
lowercase_ : List[str] = AltCLIPVisionConfig(**lowercase_ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowercase_ : List[str] = {
str(lowercase_ ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowercase_ : Any = (
f'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different '''
f'''values. The value `vision_config_dict["{key}"]` will be used instead.'''
)
# If inferred from default argument values (just to be super careful)
else:
lowercase_ : List[str] = (
f'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '''
f'''The value `vision_config["{key}"]` will be overriden.'''
)
logger.warning(lowercase_ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowercase_ : int = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowercase_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowercase_ : Optional[int] = AltCLIPTextConfig(**lowercase_ )
lowercase_ : Any = AltCLIPVisionConfig(**lowercase_ )
lowercase_ : List[Any] = projection_dim
lowercase_ : Optional[Any] = logit_scale_init_value
lowercase_ : int = 1.0
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Dict , lowercase_ : AltCLIPTextConfig , lowercase_ : AltCLIPVisionConfig , **lowercase_ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase_ : Optional[int] = self.text_config.to_dict()
lowercase_ : Any = self.vision_config.to_dict()
lowercase_ : List[str] = self.__class__.model_type
return output
| 239 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCamelCase = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ['''LayoutLMv3FeatureExtractor''']
_UpperCamelCase = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _A ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : str = (3, 32, 128)
__UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase : Any = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase : Optional[int] = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
__UpperCAmelCase : List[Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
__UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self , **__UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__UpperCAmelCase : Dict = Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) )
return image_input
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : str = self.get_tokenizer()
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Dict = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
__UpperCAmelCase : List[Any] = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Tuple = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[str] = self.prepare_image_inputs()
__UpperCAmelCase : str = image_processor(__UpperCAmelCase , return_tensors="""np""" )
__UpperCAmelCase : int = processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __A ( self ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : int = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Dict = """test"""
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.get_image_processor()
__UpperCAmelCase : Tuple = self.get_tokenizer()
__UpperCAmelCase : Optional[int] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : List[Any] = """test"""
__UpperCAmelCase : int = self.prepare_image_inputs()
__UpperCAmelCase : Tuple = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_image_processor()
__UpperCAmelCase : List[Any] = self.get_tokenizer()
__UpperCAmelCase : List[str] = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase : Optional[Any] = processor.char_decode(__UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase )
__UpperCAmelCase : int = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.get_image_processor()
__UpperCAmelCase : Optional[Any] = self.get_tokenizer()
__UpperCAmelCase : Any = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = self.prepare_image_inputs()
__UpperCAmelCase : Union[str, Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : Any = self.get_image_processor()
__UpperCAmelCase : List[str] = self.get_tokenizer()
__UpperCAmelCase : str = MgpstrProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
__UpperCAmelCase : Tuple = torch.randn(1 , 27 , 38 )
__UpperCAmelCase : Union[str, Any] = torch.randn(1 , 27 , 50_257 )
__UpperCAmelCase : Any = torch.randn(1 , 27 , 30_522 )
__UpperCAmelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 16 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :List[Any] ) -> List[str]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCAmelCase = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> List[str]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Dict ) -> Any:
UpperCAmelCase = 'sgugger/tiny-distilbert-classification'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , only_pretrain_model=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :str ) -> List[Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , torchscript=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def UpperCAmelCase__ ( self :str ) -> List[str]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , fpaa=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
UpperCAmelCase = None
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :int ) -> List[Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase_ , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self :str ) -> int:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Union[str, Any] ) -> List[str]:
UpperCAmelCase = 'sshleifer/tinier_bart'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase__ ( self :Optional[int] ) -> str:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = 'sshleifer/tinier_bart'
UpperCAmelCase = AutoConfig.from_pretrained(lowercase_ )
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ , configs=[config] )
UpperCAmelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase__ ( self :Optional[int] ) -> Union[str, Any]:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , save_to_csv=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase_ , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase_ , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase_ , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase_ , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase_ , 'env.csv' ) , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_ , 'env.csv' ) ).exists() )
def UpperCAmelCase__ ( self :Dict ) -> Tuple:
UpperCAmelCase = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ :int ):
self.assertTrue(hasattr(lowercase_ , 'sequential' ) )
self.assertTrue(hasattr(lowercase_ , 'cumulative' ) )
self.assertTrue(hasattr(lowercase_ , 'current' ) )
self.assertTrue(hasattr(lowercase_ , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase_ , inference=lowercase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase_ , 'log.txt' ) , log_print=lowercase_ , trace_memory_line_by_line=lowercase_ , multi_process=lowercase_ , )
UpperCAmelCase = PyTorchBenchmark(lowercase_ )
UpperCAmelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_ , 'log.txt' ) ).exists() )
| 78 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''fnet'''
def __init__( self : str , lowerCAmelCase__ : Dict=3_2_0_0_0 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : List[str]=1_2 , lowerCAmelCase__ : Any=3_0_7_2 , lowerCAmelCase__ : Union[str, Any]="gelu_new" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : int=5_1_2 , lowerCAmelCase__ : str=4 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : str=1e-12 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Any=5_1_2 , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : List[Any]=1 , lowerCAmelCase__ : Tuple=2 , **lowerCAmelCase__ : Any , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Optional[Any] = type_vocab_size
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Any = use_tpu_fourier_optimizations
_UpperCAmelCase : Tuple = tpu_short_seq_length | 17 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __UpperCAmelCase ( a_: str ):
for param in module.parameters():
_UpperCAmelCase : Any = False
def __UpperCAmelCase ( ):
_UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def __UpperCAmelCase ( a_: Optional[Any] ):
_UpperCAmelCase : int = plt.imshow(a_ )
fig.axes.get_xaxis().set_visible(a_ )
fig.axes.get_yaxis().set_visible(a_ )
plt.show()
def __UpperCAmelCase ( ):
_UpperCAmelCase : Dict = datetime.now()
_UpperCAmelCase : List[str] = current_time.strftime("%H:%M:%S" )
return timestamp | 17 | 1 |
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE :Tuple = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def UpperCAmelCase ( a_ = "dhaka" , a_ = 5 ) -> int:
"""simple docstring"""
__A = min(a_ , 5_0 ) # Prevent abuse!
__A = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
__A = requests.get("https://www.google.com/search" , params=a_ , headers=a_ )
__A = BeautifulSoup(html.text , "html.parser" )
__A = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
__A = json.dumps(a_ )
__A = json.loads(a_ )
__A = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , a_ , )
if not matched_google_image_data:
return 0
__A = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(a_ ) , )
__A = re.findall(
r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , a_ , )
for index, fixed_full_res_image in enumerate(a_ ):
if index >= max_images:
return index
__A = bytes(a_ , "ascii" ).decode(
"unicode-escape" )
__A = bytes(a_ , "ascii" ).decode(
"unicode-escape" )
__A = urllib.request.build_opener()
__A = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(a_ )
__A = F'''query_{query.replace(' ' , '_' )}'''
if not os.path.exists(a_ ):
os.makedirs(a_ )
urllib.request.urlretrieve( # noqa: S310
a_ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
SCREAMING_SNAKE_CASE :Dict = download_images_from_google_query(sys.argv[1])
print(f'''{image_count} images were downloaded to disk.''')
except IndexError:
print('Please provide a search term.')
raise
| 15 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE :str = 'RegNetConfig'
# Base docstring
SCREAMING_SNAKE_CASE :List[str] = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE :Optional[int] = 'facebook/regnet-y-040'
SCREAMING_SNAKE_CASE :Any = 'tabby, tabby cat'
SCREAMING_SNAKE_CASE :Optional[int] = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ,A : int ,A : int = 3 ,A : int = 1 ,A : int = 1 ,A : Optional[str] = "relu" ,**A : Dict ,):
super().__init__(**A )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__A = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
__A = tf.keras.layers.ConvaD(
filters=A ,kernel_size=A ,strides=A ,padding="VALID" ,groups=A ,use_bias=A ,name="convolution" ,)
__A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
__A = ACTaFN[activation] if activation is not None else tf.identity
def UpperCamelCase_ ( self : List[Any] ,A : Any ):
__A = self.convolution(self.padding(A ) )
__A = self.normalization(A )
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Tuple ,A : RegNetConfig ,**A : str ):
super().__init__(**A )
__A = config.num_channels
__A = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="embedder" ,)
def UpperCamelCase_ ( self : Tuple ,A : Optional[Any] ):
__A = shape_list(A )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__A = tf.transpose(A ,perm=(0, 2, 3, 1) )
__A = self.embedder(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : int ,A : int = 2 ,**A : Tuple ):
super().__init__(**A )
__A = tf.keras.layers.ConvaD(
filters=A ,kernel_size=1 ,strides=A ,use_bias=A ,name="convolution" )
__A = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name="normalization" )
def UpperCamelCase_ ( self : Union[str, Any] ,A : tf.Tensor ,A : bool = False ):
return self.normalization(self.convolution(A ) ,training=A )
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Dict ,A : int ,A : int ,**A : str ):
super().__init__(**A )
__A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" )
__A = [
tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="relu" ,name="attention.0" ),
tf.keras.layers.ConvaD(filters=A ,kernel_size=1 ,activation="sigmoid" ,name="attention.2" ),
]
def UpperCamelCase_ ( self : Dict ,A : List[Any] ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__A = self.pooler(A )
for layer_module in self.attention:
__A = layer_module(A )
__A = hidden_state * pooled
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : Optional[int] ):
super().__init__(**A )
__A = in_channels != out_channels or stride != 1
__A = max(1 ,out_channels // config.groups_width )
__A = (
TFRegNetShortCut(A ,stride=A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__A = [
TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.2" ),
]
__A = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : int ,A : Optional[int] ):
__A = hidden_state
for layer_module in self.layers:
__A = layer_module(A )
__A = self.shortcut(A )
hidden_state += residual
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[Any] ,A : RegNetConfig ,A : int ,A : int ,A : int = 1 ,**A : str ):
super().__init__(**A )
__A = in_channels != out_channels or stride != 1
__A = max(1 ,out_channels // config.groups_width )
__A = (
TFRegNetShortCut(A ,stride=A ,name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" ,name="shortcut" )
)
__A = [
TFRegNetConvLayer(A ,kernel_size=1 ,activation=config.hidden_act ,name="layer.0" ),
TFRegNetConvLayer(
A ,stride=A ,groups=A ,activation=config.hidden_act ,name="layer.1" ),
TFRegNetSELayer(A ,reduced_channels=int(round(in_channels / 4 ) ) ,name="layer.2" ),
TFRegNetConvLayer(A ,kernel_size=1 ,activation=A ,name="layer.3" ),
]
__A = ACTaFN[config.hidden_act]
def UpperCamelCase_ ( self : Dict ,A : Any ):
__A = hidden_state
for layer_module in self.layers:
__A = layer_module(A )
__A = self.shortcut(A )
hidden_state += residual
__A = self.activation(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : List[str] ,A : RegNetConfig ,A : int ,A : int ,A : int = 2 ,A : int = 2 ,**A : Optional[int] ):
super().__init__(**A )
__A = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
__A = [
# downsampling is done in the first layer with stride of 2
layer(A ,A ,A ,stride=A ,name="layers.0" ),
*[layer(A ,A ,A ,name=f'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCamelCase_ ( self : Any ,A : List[str] ):
for layer_module in self.layers:
__A = layer_module(A )
return hidden_state
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Any ,A : RegNetConfig ,**A : List[str] ):
super().__init__(**A )
__A = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="stages.0" ,) )
__A = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A ,A ,A ,depth=A ,name=f'''stages.{i+1}''' ) )
def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor ,A : bool = False ,A : bool = True ):
__A = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__A = hidden_states + (hidden_state,)
__A = stage_module(A )
if output_hidden_states:
__A = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A ,hidden_states=A )
@keras_serializable
class UpperCAmelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
snake_case_ = RegNetConfig
def __init__( self : int ,A : Optional[int] ,**A : Dict ):
super().__init__(**A )
__A = config
__A = TFRegNetEmbeddings(A ,name="embedder" )
__A = TFRegNetEncoder(A ,name="encoder" )
__A = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A ,name="pooler" )
@unpack_inputs
def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : bool = False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.embedder(A ,training=A )
__A = self.encoder(
A ,output_hidden_states=A ,return_dict=A ,training=A )
__A = encoder_outputs[0]
__A = self.pooler(A )
# Change to NCHW output format have uniformity in the modules
__A = tf.transpose(A ,perm=(0, 3, 1, 2) )
__A = tf.transpose(A ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__A = tuple([tf.transpose(A ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A ,pooler_output=A ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = RegNetConfig
snake_case_ = "regnet"
snake_case_ = "pixel_values"
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_24, 2_24) ,dtype=tf.floataa )}
SCREAMING_SNAKE_CASE :Dict = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
SCREAMING_SNAKE_CASE :Dict = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,A : RegNetConfig ,*A : List[Any] ,**A : str ):
super().__init__(A ,*A ,**A )
__A = TFRegNetMainLayer(A ,name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=A ,config_class=_CONFIG_FOR_DOC ,modality="vision" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def UpperCamelCase_ ( self : Tuple ,A : tf.Tensor ,A : Optional[bool] = None ,A : Optional[bool] = None ,A : int=False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(
pixel_values=A ,output_hidden_states=A ,return_dict=A ,training=A ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , __SCREAMING_SNAKE_CASE , )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,A : RegNetConfig ,*A : str ,**A : Tuple ):
super().__init__(A ,*A ,**A )
__A = config.num_labels
__A = TFRegNetMainLayer(A ,name="regnet" )
# classification head
__A = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=A ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def UpperCamelCase_ ( self : List[str] ,A : tf.Tensor = None ,A : tf.Tensor = None ,A : bool = None ,A : bool = None ,A : Union[str, Any]=False ,):
__A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__A = return_dict if return_dict is not None else self.config.use_return_dict
__A = self.regnet(
A ,output_hidden_states=A ,return_dict=A ,training=A )
__A = outputs.pooler_output if return_dict else outputs[1]
__A = self.classifier[0](A )
__A = self.classifier[1](A )
__A = None if labels is None else self.hf_compute_loss(labels=A ,logits=A )
if not return_dict:
__A = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A ,logits=A ,hidden_states=outputs.hidden_states )
| 15 | 1 |
def _snake_case ( lowerCamelCase__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
lowerCamelCase_ : Any =sorted(string.lower() )
return len(lowerCamelCase__ ) == len(set(lowerCamelCase__ ) )
if __name__ == "__main__":
A__ : int = input('Enter a string ').strip()
A__ : Dict = is_isogram(input_str)
print(f'{input_str} is {"an" if isogram else "not an"} isogram.')
| 355 |
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def _snake_case ( lowerCamelCase__ : Any ) -> Union[str, Any]: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def _snake_case ( ) -> List[Any]:
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
lowerCamelCase_ : Optional[Any] =[1, 2, 3]
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=2 )
with pytest.raises(lowerCamelCase__ ):
with parallel_backend("unsupported backend" ):
map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" , [2, -1] )
def _snake_case ( lowerCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ : str =[1, 2]
lowerCamelCase_ : List[str] ={"a": 1, "b": 2}
lowerCamelCase_ : List[str] ={"a": [1, 2], "b": [3, 4]}
lowerCamelCase_ : Optional[int] ={"a": {"1": 1}, "b": 2}
lowerCamelCase_ : int ={"a": 1, "b": 2, "c": 3, "d": 4}
lowerCamelCase_ : Optional[int] =[2, 3]
lowerCamelCase_ : List[Any] ={"a": 2, "b": 3}
lowerCamelCase_ : int ={"a": [2, 3], "b": [4, 5]}
lowerCamelCase_ : str ={"a": {"1": 2}, "b": 3}
lowerCamelCase_ : Dict ={"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
assert map_nested(lowerCamelCase__ , lowerCamelCase__ , num_proc=lowerCamelCase__ ) == expected_map_nested_sa
| 209 | 0 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_lowercase : Union[str, Any] = get_tests_dir('fixtures')
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# A mock response for an HTTP head request to emulate server down
__UpperCAmelCase = mock.Mock()
__UpperCAmelCase = 5_00
__UpperCAmelCase = {}
__UpperCAmelCase = HTTPError
__UpperCAmelCase = {}
# Download this model to make sure it's in the cache.
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowercase ) as mock_head:
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def a ( self : Optional[int] ):
# This test is for deprecated behavior and can be removed in v5
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class _UpperCAmelCase ( unittest.TestCase ):
@classmethod
def a ( cls : Optional[int] ):
__UpperCAmelCase = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def a ( cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def a ( self : Optional[int] ):
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id='''test-feature-extractor''' , push_to_hub=_lowercase , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def a ( self : int ):
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_lowercase , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=_lowercase , use_auth_token=self._token )
__UpperCAmelCase = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def a ( self : int ):
CustomFeatureExtractor.register_for_auto_class()
__UpperCAmelCase = CustomFeatureExtractor.from_pretrained(_lowercase )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
__UpperCAmelCase = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 332 |
"""simple docstring"""
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowercase__ ( snake_case_ :ndarray ):
return np.dot(snake_case_ , snake_case_ )
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , *,
_lowercase : float = np.inf , _lowercase : str = "linear" , _lowercase : float = 0.0 , ):
__UpperCAmelCase = regularization
__UpperCAmelCase = gamma
if kernel == "linear":
__UpperCAmelCase = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__UpperCAmelCase = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__UpperCAmelCase = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowercase )
def a ( self : Dict , _lowercase : ndarray , _lowercase : ndarray ):
return np.dot(_lowercase , _lowercase )
def a ( self : Any , _lowercase : ndarray , _lowercase : ndarray ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def a ( self : Union[str, Any] , _lowercase : list[ndarray] , _lowercase : ndarray ):
__UpperCAmelCase = observations
__UpperCAmelCase = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__UpperCAmelCase) , ) = np.shape(_lowercase )
def to_minimize(_lowercase : ndarray ) -> float:
__UpperCAmelCase = 0
((__UpperCAmelCase) , ) = np.shape(_lowercase )
for i in range(_lowercase ):
for j in range(_lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowercase )
__UpperCAmelCase = LinearConstraint(_lowercase , 0 , 0 )
__UpperCAmelCase = Bounds(0 , self.regularization )
__UpperCAmelCase = minimize(
_lowercase , np.ones(_lowercase ) , bounds=_lowercase , constraints=[ly_contraint] ).x
__UpperCAmelCase = l_star
# calculating mean offset of separation plane to points
__UpperCAmelCase = 0
for i in range(_lowercase ):
for j in range(_lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__UpperCAmelCase = s / n
def a ( self : List[Any] , _lowercase : ndarray ):
__UpperCAmelCase = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class _SCREAMING_SNAKE_CASE( lowerCamelCase_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
SCREAMING_SNAKE_CASE_ : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
SCREAMING_SNAKE_CASE_ : ClassVar[Features] = Features({'''labels''': ClassLabel} )
SCREAMING_SNAKE_CASE_ : str = "text"
SCREAMING_SNAKE_CASE_ : str = "labels"
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] ,__snake_case ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__SCREAMING_SNAKE_CASE :int = copy.deepcopy(self )
__SCREAMING_SNAKE_CASE :str = self.label_schema.copy()
__SCREAMING_SNAKE_CASE :Optional[int] = features[self.label_column]
__SCREAMING_SNAKE_CASE :List[str] = label_schema
return task_template
@property
def _UpperCamelCase ( self ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
} | 371 |
"""simple docstring"""
def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[Any] ) -> Union[str, Any]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def __lowerCamelCase ( a_ : Optional[int] , a_ : Any=0 ) -> Optional[Any]:
return sorted(a_ , key=lambda a_ : x[column] )
def __lowerCamelCase ( a_ : Optional[Any] , a_ : Optional[int] , a_ : str=float('''inf''' ) ) -> str:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :Optional[Any] = current_dis
return min_dis
def __lowerCamelCase ( a_ : List[Any] , a_ : Any , a_ : Optional[int]=float('''inf''' ) ) -> Optional[Any]:
for i in range(min(6 , points_counts - 1 ) , a_ ):
for j in range(max(0 , i - 6 ) , a_ ):
__SCREAMING_SNAKE_CASE :Dict = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__SCREAMING_SNAKE_CASE :int = current_dis
return min_dis
def __lowerCamelCase ( a_ : str , a_ : List[Any] , a_ : int ) -> Optional[int]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a_ , a_ )
# recursion
__SCREAMING_SNAKE_CASE :int = points_counts // 2
__SCREAMING_SNAKE_CASE :Dict = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[:mid] , a_ )
__SCREAMING_SNAKE_CASE :Any = closest_pair_of_points_sqr(
a_ , points_sorted_on_y[mid:] , points_counts - mid )
__SCREAMING_SNAKE_CASE :Union[str, Any] = min(a_ , a_ )
__SCREAMING_SNAKE_CASE :str = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a_ )
__SCREAMING_SNAKE_CASE :Dict = dis_between_closest_in_strip(
a_ , len(a_ ) , a_ )
return min(a_ , a_ )
def __lowerCamelCase ( a_ : int , a_ : Tuple ) -> List[Any]:
__SCREAMING_SNAKE_CASE :Union[str, Any] = column_based_sort(a_ , column=0 )
__SCREAMING_SNAKE_CASE :int = column_based_sort(a_ , column=1 )
return (
closest_pair_of_points_sqr(
a_ , a_ , a_ )
) ** 0.5
if __name__ == "__main__":
lowerCamelCase_ = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points))) | 239 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230 |
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_config()
__lowerCamelCase = 300
return config
def lowerCamelCase ( self ):
'''simple docstring'''
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = self.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__lowerCamelCase = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = True
__lowerCamelCase = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_labels
__lowerCamelCase = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.num_choices
__lowerCamelCase = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = ()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason='''MRA does not output attentions''' )
def lowerCamelCase ( self ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[-0.0_140, 0.0_830, -0.0_381], [0.1_546, 0.1_402, 0.0_220], [0.1_162, 0.0_851, 0.0_165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
__lowerCamelCase = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[9.2_595, -3.6_038, 11.8_819], [9.3_869, -3.2_693, 11.0_956], [11.8_524, -3.4_938, 13.1_210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
__lowerCamelCase = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
__lowerCamelCase = model(__UpperCAmelCase )[0]
__lowerCamelCase = 50265
__lowerCamelCase = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__lowerCamelCase = torch.tensor(
[[[5.4_789, -2.3_564, 7.5_064], [7.9_067, -1.3_369, 9.9_668], [9.0_712, -1.8_106, 7.0_380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 330 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : str=13 ,_UpperCAmelCase : Union[str, Any]=10 ,_UpperCAmelCase : List[str]=3 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : int=32 ,_UpperCAmelCase : str=5 ,_UpperCAmelCase : Any=4 ,_UpperCAmelCase : Dict=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : List[str]=0.1 ,_UpperCAmelCase : List[Any]=0.1 ,_UpperCAmelCase : Optional[int]=10 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : List[str]="divided_space_time" ,_UpperCAmelCase : List[str]=None ,):
_a : List[Any] = parent
_a : str = batch_size
_a : Tuple = image_size
_a : Any = num_channels
_a : Tuple = patch_size
_a : int = num_frames
_a : Dict = is_training
_a : Optional[Any] = use_labels
_a : List[Any] = hidden_size
_a : Any = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : Optional[int] = intermediate_size
_a : Tuple = hidden_act
_a : Optional[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : int = attention_type
_a : Optional[Any] = initializer_range
_a : Optional[int] = scope
_a : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
_a : Any = (image_size // patch_size) ** 2
_a : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def __lowercase ( self : Tuple ):
_a : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a : str = None
if self.use_labels:
_a : List[Any] = ids_tensor([self.batch_size] ,self.num_labels )
_a : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Dict ):
_a : Tuple = TimesformerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,)
_a : Union[str, Any] = self.num_labels
return config
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ):
_a : List[Any] = TimesformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ):
_a : Optional[int] = TimesformerForVideoClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : int = model(_UpperCAmelCase )
# verify the logits shape
_a : Any = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = self.prepare_config_and_inputs()
_a : Any = config_and_inputs
_a : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowerCAmelCase : List[str] = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : str = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : Dict = False
def __lowercase ( self : int ):
_a : List[str] = TimesformerModelTester(self )
_a : int = ConfigTester(
self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ,_UpperCAmelCase : List[str]=False ):
_a : Tuple = copy.deepcopy(_UpperCAmelCase )
if return_labels:
if model_class in get_values(_UpperCAmelCase ):
_a : Dict = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_UpperCAmelCase )
return inputs_dict
def __lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __lowercase ( self : Optional[Any] ):
pass
def __lowercase ( self : str ):
_a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Any ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Tuple ):
_a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[Any] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = TimesformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
if not self.has_attentions:
pass
else:
_a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = True
for model_class in self.all_model_classes:
_a : List[str] = self.model_tester.seq_length
_a : str = self.model_tester.num_frames
_a : Union[str, Any] = True
_a : str = False
_a : Union[str, Any] = True
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Tuple = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a : Optional[int] = True
_a : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : int = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
_a : Any = len(_UpperCAmelCase )
# Check attention is always last and order is fine
_a : List[str] = True
_a : List[Any] = True
_a : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[str] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(_UpperCAmelCase ) )
_a : List[str] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) ,self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,)
def __lowercase ( self : Union[str, Any] ):
def check_hidden_states_output(_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : Dict = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : List[Any] = outputs.hidden_states
_a : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_UpperCAmelCase ) ,_UpperCAmelCase )
_a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
_a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowerCamelCase ( ) -> Any:
_a : Any = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
_a : List[str] = np.load(lowerCAmelCase_ )
return list(lowerCAmelCase_ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : int ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : List[str] ):
_a : Any = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
_UpperCAmelCase )
_a : Dict = self.default_image_processor
_a : Optional[Any] = prepare_video()
_a : Any = image_processor(video[:8] ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : str = model(**_UpperCAmelCase )
# verify the logits
_a : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : List[Any] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 354 |
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__lowerCAmelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__lowerCAmelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__lowerCAmelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_000))
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, float]:
_a : List[Any] = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] )
return (item, float(lowerCAmelCase_ ))
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, str]:
_a : Dict = random.randint(0 , len(lowerCAmelCase_ ) - 1 )
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
_a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : Optional[Any] = list(lowerCAmelCase_ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_a : Optional[int] = random.choice(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> list[str]:
_a : List[str] = []
# Generate more children proportionally to the fitness score.
_a : Tuple = int(parent_a[1] * 100 ) + 1
_a : Tuple = 10 if child_n >= 10 else child_n
for _ in range(lowerCAmelCase_ ):
_a : Any = population_score[random.randint(0 , lowerCAmelCase_ )][0]
_a , _a : Tuple = crossover(parent_a[0] , lowerCAmelCase_ )
# Append new string to the population list.
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) )
return pop
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_a : Dict = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowerCAmelCase_ )
# Verify that the target contains no genes besides the ones inside genes variable.
_a : Optional[int] = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_a : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowerCAmelCase_ )
# Generate random starting population.
_a : Union[str, Any] = []
for _ in range(lowerCAmelCase_ ):
population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) )
# Just some logs to know what the algorithms is doing.
_a , _a : Union[str, Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowerCAmelCase_ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_a : Optional[Any] = [evaluate(lowerCAmelCase_ , lowerCAmelCase_ ) for item in population]
# Check if there is a matching evolution.
_a : Tuple = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_a : Dict = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowerCAmelCase_ )
# Normalize population score to be between 0 and 1.
_a : Tuple = [
(item, score / len(lowerCAmelCase_ )) for item, score in population_score
]
# This is selection
for i in range(lowerCAmelCase_ ):
population.extend(select(population_score[int(lowerCAmelCase_ )] , lowerCAmelCase_ , lowerCAmelCase_ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowerCAmelCase_ ) > N_POPULATION:
break
if __name__ == "__main__":
__lowerCAmelCase = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
__lowerCAmelCase = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = basic(target_str, genes_list)
print(
f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 107 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def _A ( UpperCamelCase_ : int) -> Any:
'''simple docstring'''
__lowercase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder"):
__lowercase = key.replace("module.encoder", "glpn.encoder")
if key.startswith("module.decoder"):
__lowercase = key.replace("module.decoder", "decoder.stages")
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
__lowercase = key[key.find("patch_embed") + len("patch_embed")]
__lowercase = key.replace(F"""patch_embed{idx}""", F"""patch_embeddings.{int(UpperCamelCase_)-1}""")
if "norm" in key:
__lowercase = key.replace("norm", "layer_norm")
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
__lowercase = key[key.find("glpn.encoder.layer_norm") + len("glpn.encoder.layer_norm")]
__lowercase = key.replace(F"""layer_norm{idx}""", F"""layer_norm.{int(UpperCamelCase_)-1}""")
if "layer_norm1" in key:
__lowercase = key.replace("layer_norm1", "layer_norm_1")
if "layer_norm2" in key:
__lowercase = key.replace("layer_norm2", "layer_norm_2")
if "block" in key:
# replace for example block1 by block.0
__lowercase = key[key.find("block") + len("block")]
__lowercase = key.replace(F"""block{idx}""", F"""block.{int(UpperCamelCase_)-1}""")
if "attn.q" in key:
__lowercase = key.replace("attn.q", "attention.self.query")
if "attn.proj" in key:
__lowercase = key.replace("attn.proj", "attention.output.dense")
if "attn" in key:
__lowercase = key.replace("attn", "attention.self")
if "fc1" in key:
__lowercase = key.replace("fc1", "dense1")
if "fc2" in key:
__lowercase = key.replace("fc2", "dense2")
if "linear_pred" in key:
__lowercase = key.replace("linear_pred", "classifier")
if "linear_fuse" in key:
__lowercase = key.replace("linear_fuse.conv", "linear_fuse")
__lowercase = key.replace("linear_fuse.bn", "batch_norm")
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
__lowercase = key[key.find("linear_c") + len("linear_c")]
__lowercase = key.replace(F"""linear_c{idx}""", F"""linear_c.{int(UpperCamelCase_)-1}""")
if "bot_conv" in key:
__lowercase = key.replace("bot_conv", "0.convolution")
if "skip_conv1" in key:
__lowercase = key.replace("skip_conv1", "1.convolution")
if "skip_conv2" in key:
__lowercase = key.replace("skip_conv2", "2.convolution")
if "fusion1" in key:
__lowercase = key.replace("fusion1", "1.fusion")
if "fusion2" in key:
__lowercase = key.replace("fusion2", "2.fusion")
if "fusion3" in key:
__lowercase = key.replace("fusion3", "3.fusion")
if "fusion" in key and "conv" in key:
__lowercase = key.replace("conv", "convolutional_layer")
if key.startswith("module.last_layer_depth"):
__lowercase = key.replace("module.last_layer_depth", "head.head")
__lowercase = value
return new_state_dict
def _A ( UpperCamelCase_ : Optional[int], UpperCamelCase_ : Union[str, Any]) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks):
for j in range(config.depths[i]):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
__lowercase = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""")
__lowercase = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""")
# next, add keys and values (in that order) to the state dict
__lowercase = kv_weight[
: config.hidden_sizes[i], :
]
__lowercase = kv_bias[: config.hidden_sizes[i]]
__lowercase = kv_weight[
config.hidden_sizes[i] :, :
]
__lowercase = kv_bias[config.hidden_sizes[i] :]
def _A ( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowercase = Image.open(requests.get(UpperCamelCase_, stream=UpperCamelCase_).raw)
return image
@torch.no_grad()
def _A ( UpperCamelCase_ : Any, UpperCamelCase_ : int, UpperCamelCase_ : int=False, UpperCamelCase_ : str=None) -> Any:
'''simple docstring'''
__lowercase = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3])
# load image processor (only resize + rescale)
__lowercase = GLPNImageProcessor()
# prepare image
__lowercase = prepare_img()
__lowercase = image_processor(images=UpperCamelCase_, return_tensors="pt").pixel_values
logger.info("Converting model...")
# load original state dict
__lowercase = torch.load(UpperCamelCase_, map_location=torch.device("cpu"))
# rename keys
__lowercase = rename_keys(UpperCamelCase_)
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase_, UpperCamelCase_)
# create HuggingFace model and load state dict
__lowercase = GLPNForDepthEstimation(UpperCamelCase_)
model.load_state_dict(UpperCamelCase_)
model.eval()
# forward pass
__lowercase = model(UpperCamelCase_)
__lowercase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
__lowercase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]])
elif "kitti" in model_name:
__lowercase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]])
else:
raise ValueError(F"""Unknown model name: {model_name}""")
__lowercase = torch.Size([1, 480, 640])
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], UpperCamelCase_, atol=1E-4)
print("Looks ok!")
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub...")
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_, UpperCamelCase_), organization="nielsr", commit_message="Add model", use_temp_dir=UpperCamelCase_, )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase_, UpperCamelCase_), organization="nielsr", commit_message="Add image processor", use_temp_dir=UpperCamelCase_, )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
_a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 17 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """trocr"""
__lowerCAmelCase = ["""past_key_values"""]
__lowerCAmelCase = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int]=5_0265 , lowerCamelCase_ : Optional[int]=1024 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=4096 , lowerCamelCase_ : Tuple="gelu" , lowerCamelCase_ : List[str]=512 , lowerCamelCase_ : Union[str, Any]=0.1 , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Tuple=0.0_2 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : str=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : List[str]=1 , lowerCamelCase_ : Optional[Any]=0 , lowerCamelCase_ : List[Any]=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = activation_function
UpperCamelCase = max_position_embeddings
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = init_std
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = scale_embedding
UpperCamelCase = use_learned_position_embeddings
UpperCamelCase = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , decoder_start_token_id=lowerCamelCase_ , **lowerCamelCase_ , )
| 343 | 0 |
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> None:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = set_counts
UpperCamelCase__ : Any = max(__magic_name__ )
UpperCamelCase__ : List[Any] = len(__magic_name__ )
UpperCamelCase__ : Any = [1] * num_sets
UpperCamelCase__ : Tuple = list(range(__magic_name__ ) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> bool:
"""simple docstring"""
UpperCamelCase__ : Any = self.get_parent(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = self.get_parent(__magic_name__ )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
UpperCamelCase__ : Tuple = 0
UpperCamelCase__ : Optional[Any] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
UpperCamelCase__ : str = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Optional[int] = src_parent
UpperCamelCase__ : Union[str, Any] = self.set_counts[src_parent]
UpperCamelCase__ : List[str] = max(self.max_set, __magic_name__ )
return True
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
UpperCamelCase__ : Optional[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 351 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=32, __magic_name__=3, __magic_name__=4, __magic_name__=[10, 20, 30, 40], __magic_name__=[2, 2, 3, 2], __magic_name__=True, __magic_name__=True, __magic_name__=37, __magic_name__="gelu", __magic_name__=10, __magic_name__=0.02, __magic_name__=["stage2", "stage3", "stage4"], __magic_name__=3, __magic_name__=None, ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = parent
UpperCamelCase__ : Tuple = batch_size
UpperCamelCase__ : Tuple = image_size
UpperCamelCase__ : Optional[int] = num_channels
UpperCamelCase__ : int = num_stages
UpperCamelCase__ : Union[str, Any] = hidden_sizes
UpperCamelCase__ : str = depths
UpperCamelCase__ : str = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Dict = hidden_act
UpperCamelCase__ : Optional[Any] = type_sequence_label_size
UpperCamelCase__ : List[str] = initializer_range
UpperCamelCase__ : str = out_features
UpperCamelCase__ : Union[str, Any] = num_labels
UpperCamelCase__ : Dict = scope
UpperCamelCase__ : List[str] = num_stages
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=__magic_name__, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=__magic_name__, loss_ignore_index=255, num_labels=self.num_labels, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = UperNetForSemanticSegmentation(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
UpperCamelCase__ : Any = model(__magic_name__ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,(
UpperCamelCase__
) ,
) : List[Any] = config_and_inputs
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
a : List[str] = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
a : Union[str, Any] = False
a : Tuple = False
a : int = False
a : List[str] = False
a : Union[str, Any] = False
a : str = False
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = UperNetModelTester(self )
UpperCamelCase__ : List[str] = ConfigTester(self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(__magic_name__ )
UpperCamelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__magic_name__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ) )
UpperCamelCase__ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Any = self.model_tester.num_stages
self.assertEqual(len(__magic_name__ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : str = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Union[str, Any] = _config_zero_init(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(config=__magic_name__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", )
@unittest.skip(reason='''UperNet does not have tied weights''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : int = UperNetForSemanticSegmentation.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ : Tuple = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
UpperCamelCase__ : str = Image.open(__UpperCAmelCase ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
UpperCamelCase__ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(__magic_name__ )
UpperCamelCase__ : Any = prepare_img()
UpperCamelCase__ : List[Any] = processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ )
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**__magic_name__ )
UpperCamelCase__ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : int = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __magic_name__, atol=1E-4 ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
UpperCamelCase__ : Dict = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(__magic_name__ )
UpperCamelCase__ : str = prepare_img()
UpperCamelCase__ : int = processor(images=__magic_name__, return_tensors='''pt''' ).to(__magic_name__ )
with torch.no_grad():
UpperCamelCase__ : Dict = model(**__magic_name__ )
UpperCamelCase__ : Any = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : Tuple = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], __magic_name__, atol=1E-4 ) )
| 247 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
assert isinstance(__lowercase , __lowercase ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
__UpperCamelCase = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(__lowercase )
else:
__UpperCamelCase = sylvester(number - 1 )
__UpperCamelCase = num - 1
__UpperCamelCase = num
return lower * upper + 1
if __name__ == "__main__":
print(f'The 8th number in Sylvester\'s sequence: {sylvester(8)}')
| 53 |
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=__A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = ["note_seq"]
def __init__( self : Optional[int] , *_UpperCamelCase : str , **_UpperCamelCase : Optional[int] ) ->Any:
requires_backends(self , ['''note_seq'''] )
@classmethod
def snake_case__( cls : int , *_UpperCamelCase : Any , **_UpperCamelCase : List[Any] ) ->int:
requires_backends(cls , ['''note_seq'''] )
@classmethod
def snake_case__( cls : Dict , *_UpperCamelCase : Optional[int] , **_UpperCamelCase : Union[str, Any] ) ->List[str]:
requires_backends(cls , ['''note_seq'''] ) | 8 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__a: List[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "train"
SCREAMING_SNAKE_CASE = "dev"
SCREAMING_SNAKE_CASE = "test"
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase ) -> List[str]:
raise NotImplementedError
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase="[CLS]" , __lowerCAmelCase=1 , __lowerCAmelCase="[SEP]" , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0 , __lowerCAmelCase=0 , __lowerCAmelCase=-100 , __lowerCAmelCase=0 , __lowerCAmelCase=True , ) -> List[InputFeatures]:
lowercase__ : Tuple = {label: i for i, label in enumerate(__lowerCAmelCase )}
lowercase__ : List[Any] = []
for ex_index, example in enumerate(__lowerCAmelCase ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' , __lowerCAmelCase , len(__lowerCAmelCase ) )
lowercase__ : str = []
lowercase__ : Dict = []
for word, label in zip(example.words , example.labels ):
lowercase__ : Tuple = tokenizer.tokenize(__lowerCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__lowerCAmelCase ) > 0:
tokens.extend(__lowerCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__lowerCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
lowercase__ : List[str] = tokenizer.num_special_tokens_to_add()
if len(__lowerCAmelCase ) > max_seq_length - special_tokens_count:
lowercase__ : str = tokens[: (max_seq_length - special_tokens_count)]
lowercase__ : Union[str, Any] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
lowercase__ : Any = [sequence_a_segment_id] * len(__lowerCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
lowercase__ : Tuple = [cls_token] + tokens
lowercase__ : int = [pad_token_label_id] + label_ids
lowercase__ : Dict = [cls_token_segment_id] + segment_ids
lowercase__ : str = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
lowercase__ : List[Any] = [1 if mask_padding_with_zero else 0] * len(__lowerCAmelCase )
# Zero-pad up to the sequence length.
lowercase__ : Optional[Any] = max_seq_length - len(__lowerCAmelCase )
if pad_on_left:
lowercase__ : int = ([pad_token] * padding_length) + input_ids
lowercase__ : List[str] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
lowercase__ : List[str] = ([pad_token_segment_id] * padding_length) + segment_ids
lowercase__ : Dict = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
assert len(__lowerCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(__lowerCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ : Tuple = None
features.append(
InputFeatures(
input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , label_ids=__lowerCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = nn.CrossEntropyLoss().ignore_index
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
lowercase__ : Union[str, Any] = os.path.join(
__lowerCAmelCase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowercase__ : Dict = torch.load(__lowerCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowercase__ : List[Any] = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ : List[Any] = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , __lowerCAmelCase )
def __len__( self ) -> Optional[Any]:
return len(self.features )
def __getitem__( self , __lowerCAmelCase ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = -1_0_0
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = Split.train , ) -> Optional[int]:
lowercase__ : str = token_classification_task.read_examples_from_file(__lowerCAmelCase , __lowerCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
lowercase__ : Union[str, Any] = token_classification_task.convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
lowercase__ : Dict = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
lowercase__ : List[str] = tf.data.Dataset.from_generator(
__lowerCAmelCase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , __lowerCAmelCase ) -> InputFeatures:
return self.features[i]
| 214 | '''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase__ : Optional[Any] = load_dataset('''ashraq/esc50''' )
lowercase__ : Tuple = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Optional[Any] = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> str:
pass
@slow
@require_torch
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase__ : int = load_dataset('''ashraq/esc50''' )
lowercase__ : str = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Any = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase__ : Dict = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase__ : Any = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 214 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
snake_case__ : Optional[Any] = '''dpr'''
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : str=1_2 , SCREAMING_SNAKE_CASE__ : Dict=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=5_1_2 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Dict=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=1E-12 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : int = 0 , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Tuple:
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
a_ : Optional[Any] = vocab_size
a_ : Union[str, Any] = hidden_size
a_ : int = num_hidden_layers
a_ : int = num_attention_heads
a_ : List[str] = hidden_act
a_ : Dict = intermediate_size
a_ : Any = hidden_dropout_prob
a_ : List[Any] = attention_probs_dropout_prob
a_ : Optional[int] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : Any = initializer_range
a_ : Tuple = layer_norm_eps
a_ : List[str] = projection_dim
a_ : str = position_embedding_type
| 32 |
class _a :
def __init__( self: Any ) -> Tuple:
"""simple docstring"""
lowercase__ = ''''''
lowercase__ = ''''''
lowercase__ = []
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowercase__ = self.__min_dist_top_down_dp(UpperCamelCase_ , n - 1 )
lowercase__ = self.__min_dist_top_down_dp(m - 1 , UpperCamelCase_ )
lowercase__ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowercase__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self.dp[m][n]
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> int:
"""simple docstring"""
lowercase__ = worda
lowercase__ = worda
lowercase__ = [[-1 for _ in range(len(UpperCamelCase_ ) )] for _ in range(len(UpperCamelCase_ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase_ ) - 1 , len(UpperCamelCase_ ) - 1 )
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: str ) -> int:
"""simple docstring"""
lowercase__ = worda
lowercase__ = worda
lowercase__ = len(UpperCamelCase_ )
lowercase__ = len(UpperCamelCase_ )
lowercase__ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowercase__ = j
elif j == 0: # second string is empty
lowercase__ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowercase__ = self.dp[i - 1][j - 1]
else:
lowercase__ = self.dp[i][j - 1]
lowercase__ = self.dp[i - 1][j]
lowercase__ = self.dp[i - 1][j - 1]
lowercase__ = 1 + min(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
lowerCAmelCase = input('Enter the first string: ').strip()
lowerCAmelCase = input('Enter the second string: ').strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 110 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 145 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = Dict[str, Any]
__UpperCAmelCase = List[Prediction]
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Optional[int]:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __UpperCAmelCase ( self , **_UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : str = {}
if "threshold" in kwargs:
UpperCAmelCase_ : Tuple = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = load_image(_UpperCamelCase )
UpperCAmelCase_ : Any = torch.IntTensor([[image.height, image.width]] )
UpperCAmelCase_ : str = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
UpperCAmelCase_ : List[str] = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
UpperCAmelCase_ : Any = target_size
return inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Any = model_inputs.pop('target_size' )
UpperCAmelCase_ : Optional[Any] = self.model(**_UpperCamelCase )
UpperCAmelCase_ : Dict = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
UpperCAmelCase_ : List[str] = model_inputs['bbox']
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0.9 ) -> List[str]:
UpperCAmelCase_ : List[Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
UpperCAmelCase_ , UpperCAmelCase_ : str = target_size[0].tolist()
def unnormalize(_UpperCamelCase ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
UpperCAmelCase_ : Union[str, Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
UpperCAmelCase_ : Tuple = [unnormalize(_UpperCamelCase ) for bbox in model_outputs['bbox'].squeeze(0 )]
UpperCAmelCase_ : List[str] = ['score', 'label', 'box']
UpperCAmelCase_ : Any = [dict(zip(_UpperCamelCase , _UpperCamelCase ) ) for vals in zip(scores.tolist() , _UpperCamelCase , _UpperCamelCase ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
UpperCAmelCase_ : Union[str, Any] = self.image_processor.post_process_object_detection(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = raw_annotations[0]
UpperCAmelCase_ : str = raw_annotation['scores']
UpperCAmelCase_ : Tuple = raw_annotation['labels']
UpperCAmelCase_ : List[Any] = raw_annotation['boxes']
UpperCAmelCase_ : Union[str, Any] = scores.tolist()
UpperCAmelCase_ : int = [self.model.config.idalabel[label.item()] for label in labels]
UpperCAmelCase_ : Any = [self._get_bounding_box(_UpperCamelCase ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
UpperCAmelCase_ : int = ['score', 'label', 'box']
UpperCAmelCase_ : Dict = [
dict(zip(_UpperCamelCase , _UpperCamelCase ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = box.int().tolist()
UpperCAmelCase_ : str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 145 | 1 |
'''simple docstring'''
UpperCAmelCase = '''Tobias Carryer'''
from time import time
class lowerCAmelCase :
def __init__( self : Union[str, Any] , __lowercase : Tuple , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : Tuple=int(time() ) ): # noqa: B008
"""simple docstring"""
__lowercase =multiplier
__lowercase =increment
__lowercase =modulo
__lowercase =seed
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCAmelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 141 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( _snake_case : int , _snake_case : int ) -> list[list[int]]:
'''simple docstring'''
_A = []
create_all_state(1 , _snake_case , _snake_case , [] , _snake_case )
return result
def _snake_case ( _snake_case : int , _snake_case : int , _snake_case : int , _snake_case : list[int] , _snake_case : list[list[int]] , ) -> None:
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_snake_case , total_number - level + 2 ):
current_list.append(_snake_case )
create_all_state(i + 1 , _snake_case , level - 1 , _snake_case , _snake_case )
current_list.pop()
def _snake_case ( _snake_case : list[list[int]] ) -> None:
'''simple docstring'''
for i in total_list:
print(*_snake_case )
if __name__ == "__main__":
a = 4
a = 2
a = generate_all_combinations(n, k)
print_all_state(total_list)
| 315 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__a: int = TypeVar("""T""")
__a: int = TypeVar("""U""")
class UpperCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
lowercase__ : List[Any] = key
lowercase__ : Dict = val
lowercase__ : DoubleLinkedListNode[T, U] | None = None
lowercase__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class UpperCAmelCase ( Generic[T, U] ):
'''simple docstring'''
def __init__( self ) -> None:
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : List[Any] = self.rear, self.head
def __repr__( self ) -> str:
lowercase__ : Dict = ['''DoubleLinkedList''']
lowercase__ : str = self.head
while node.next is not None:
rep.append(str(__lowerCAmelCase ) )
lowercase__ : Optional[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> None:
lowercase__ : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase__ : Any = node
lowercase__ : Union[str, Any] = previous
lowercase__ : Dict = node
lowercase__ : List[Any] = self.rear
def _lowerCAmelCase( self , __lowerCAmelCase ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
lowercase__ : Optional[Any] = node.next
lowercase__ : List[Any] = node.prev
lowercase__ : List[Any] = None
lowercase__ : Optional[Any] = None
return node
class UpperCAmelCase ( Generic[T, U] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
def __init__( self , __lowerCAmelCase ) -> Tuple:
lowercase__ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase__ : Optional[Any] = capacity
lowercase__ : int = 0
lowercase__ : Union[str, Any] = 0
lowercase__ : Dict = 0
lowercase__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , __lowerCAmelCase ) -> bool:
return key in self.cache
def _lowerCAmelCase( self , __lowerCAmelCase ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowercase__ : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase__ : Any = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__lowerCAmelCase )
return node.val
self.miss += 1
return None
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase__ : List[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__lowerCAmelCase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase__ : Dict = DoubleLinkedListNode(__lowerCAmelCase , __lowerCAmelCase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase__ : int = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase__ : Any = value
self.list.add(__lowerCAmelCase )
@classmethod
def _lowerCAmelCase( cls , __lowerCAmelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(__lowerCAmelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__lowerCAmelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase__ : str = LRUCache(__lowerCAmelCase )
lowercase__ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase__ : int = func(*__lowerCAmelCase )
cls.decorator_function_to_instance_map[func].put(args[0] , __lowerCAmelCase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__lowerCAmelCase , '''cache_info''' , __lowerCAmelCase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367 | '''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _lowerCAmelCase( self ) -> int:
lowercase__ : str = pipeline(
task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' )
lowercase__ : Optional[Any] = load_dataset('''ashraq/esc50''' )
lowercase__ : Tuple = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Optional[Any] = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{'''score''': 0.5_0_1, '''label''': '''Sound of a dog'''}, {'''score''': 0.4_9_9, '''label''': '''Sound of vaccum cleaner'''}] , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> str:
pass
@slow
@require_torch
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = pipeline(
task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , )
# This is an audio of a dog
lowercase__ : int = load_dataset('''ashraq/esc50''' )
lowercase__ : str = dataset['''train''']['''audio'''][-1]['''array''']
lowercase__ : Any = audio_classifier(__lowerCAmelCase , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
] , )
lowercase__ : Dict = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
lowercase__ : Any = audio_classifier(
[audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[
{'''score''': 0.9_9_9, '''label''': '''Sound of a dog'''},
{'''score''': 0.0_0_1, '''label''': '''Sound of vaccum cleaner'''},
],
]
* 5 , )
@unittest.skip('''No models are available in TF''' )
def _lowerCAmelCase( self ) -> Union[str, Any]:
pass
| 214 | 0 |
import logging
import os
from .state import PartialState
class __snake_case ( logging.LoggerAdapter ):
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case ):
'''simple docstring'''
lowercase : str = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
lowercase : Any = kwargs.pop("""main_process_only""" ,snake_case )
lowercase : Any = kwargs.pop("""in_order""" ,snake_case )
if self.isEnabledFor(snake_case ):
if self._should_log(snake_case ):
lowercase , lowercase : Optional[int] = self.process(snake_case ,snake_case )
self.logger.log(snake_case ,snake_case ,*snake_case ,**snake_case )
elif in_order:
lowercase : Any = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowercase , lowercase : Union[str, Any] = self.process(snake_case ,snake_case )
self.logger.log(snake_case ,snake_case ,*snake_case ,**snake_case )
state.wait_for_everyone()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Dict:
if log_level is None:
lowercase : Dict = os.environ.get("""ACCELERATE_LOG_LEVEL""" , SCREAMING_SNAKE_CASE__ )
lowercase : int = logging.getLogger(SCREAMING_SNAKE_CASE__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE__ , {} )
| 20 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
__snake_case : bool = field(
default=lowerCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
__snake_case : bool = field(default=lowerCAmelCase_ , metadata={"help": "whether to use adafactor"} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(default=lowerCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
__snake_case : Optional[float] = field(
default=lowerCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
__snake_case : Optional[str] = field(
default="linear" , metadata={"help": F"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 296 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Tuple = inspect.getfile(accelerate.test_utils )
UpperCamelCase_: Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
UpperCamelCase_: Optional[int] = test_metrics
@require_cpu
def lowerCAmelCase__ ( self : Tuple ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowerCAmelCase__ ( self : List[str] ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowerCAmelCase__ ( self : str ):
self.test_metrics.main()
@require_multi_gpu
def lowerCAmelCase__ ( self : List[str] ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCamelCase_: Tuple = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(snake_case_ , env=os.environ.copy() )
| 223 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : Dict , snake_case_ : Tuple=7 , snake_case_ : Optional[Any]=3 , snake_case_ : Dict=18 , snake_case_ : Dict=30 , snake_case_ : Union[str, Any]=400 , snake_case_ : List[Any]=True , snake_case_ : Any=None , snake_case_ : List[str]=True , ):
UpperCamelCase_: Dict = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_: Union[str, Any] = parent
UpperCamelCase_: Tuple = batch_size
UpperCamelCase_: List[str] = num_channels
UpperCamelCase_: Optional[int] = image_size
UpperCamelCase_: Dict = min_resolution
UpperCamelCase_: Optional[int] = max_resolution
UpperCamelCase_: str = do_resize
UpperCamelCase_: Tuple = size
UpperCamelCase_: Dict = do_normalize
def lowerCAmelCase__ ( self : str ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class _UpperCamelCase ( _A , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = ImageGPTImageProcessor if is_vision_available() else None
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Any = ImageGPTImageProcessingTester(self )
@property
def lowerCAmelCase__ ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Optional[int] ):
UpperCamelCase_: List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """clusters""" ) )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_normalize""" ) )
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase_: Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase__ ( self : Optional[Any] ):
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_: Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , obj[key] ) )
else:
self.assertEqual(obj[key] , snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ):
UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_: int = os.path.join(snake_case_ , """image_processor.json""" )
image_processor_first.to_json_file(snake_case_ )
UpperCamelCase_: Any = self.image_processing_class.from_json_file(snake_case_ ).to_dict()
UpperCamelCase_: str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] ):
UpperCamelCase_: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(snake_case_ )
UpperCamelCase_: Optional[int] = self.image_processing_class.from_pretrained(snake_case_ ).to_dict()
UpperCamelCase_: Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(snake_case_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , snake_case_ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def lowerCAmelCase__ ( self : List[Any] ):
pass
def A__ ( ) -> Optional[int]:
UpperCamelCase_: Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" )
UpperCamelCase_: Tuple = Image.open(dataset[4]["""file"""] )
UpperCamelCase_: Union[str, Any] = Image.open(dataset[5]["""file"""] )
UpperCamelCase_: List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: List[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
UpperCamelCase_: List[str] = prepare_images()
# test non-batched
UpperCamelCase_: List[str] = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase_: Union[str, Any] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , snake_case_ )
# test batched
UpperCamelCase_: Optional[int] = image_processing(snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase_: str = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , snake_case_ )
| 223 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Tuple=18 , __UpperCAmelCase : Tuple=30 , __UpperCAmelCase : int=400 , __UpperCAmelCase : int=True , __UpperCAmelCase : List[Any]=32 , __UpperCAmelCase : List[Any]=True , ):
'''simple docstring'''
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size_divisor
_A = do_rescale
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = GLPNImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = GLPNImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size_divisor" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "resample" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_rescale" ) )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_A = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 79 |
from collections import defaultdict
def _a ( UpperCamelCase_ : int ) -> int:
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = True
for v in tree[start]:
if v not in visited:
ret += dfs(UpperCamelCase_ )
if ret % 2 == 0:
cuts.append(UpperCamelCase_ )
return ret
def _a ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
a_, a_ = 10, 9
a_ = defaultdict(list)
a_ = {}
a_ = []
a_ = 0
a_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 340 | 0 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger()
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: str , __UpperCAmelCase: LevitConfig , __UpperCAmelCase: Path , __UpperCAmelCase: bool = True ) -> int:
print(f"Converting {name}..." )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
UpperCamelCase__ : List[Any] = timm.create_model('''levit_128s''' , pretrained=__UpperCAmelCase )
else:
UpperCamelCase__ : Tuple = timm.create_model('''levit_128''' , pretrained=__UpperCAmelCase )
if hidden_sizes == 192:
UpperCamelCase__ : str = timm.create_model('''levit_192''' , pretrained=__UpperCAmelCase )
if hidden_sizes == 256:
UpperCamelCase__ : Any = timm.create_model('''levit_256''' , pretrained=__UpperCAmelCase )
if hidden_sizes == 384:
UpperCamelCase__ : int = timm.create_model('''levit_384''' , pretrained=__UpperCAmelCase )
from_model.eval()
UpperCamelCase__ : int = LevitForImageClassificationWithTeacher(__UpperCAmelCase ).eval()
UpperCamelCase__ : str = OrderedDict()
UpperCamelCase__ : Any = from_model.state_dict()
UpperCamelCase__ : Dict = list(from_model.state_dict().keys() )
UpperCamelCase__ : Tuple = list(our_model.state_dict().keys() )
print(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
for i in range(len(__UpperCAmelCase ) ):
UpperCamelCase__ : int = weights[og_keys[i]]
our_model.load_state_dict(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = torch.randn((2, 3, 224, 224) )
UpperCamelCase__ : Any = from_model(__UpperCAmelCase )
UpperCamelCase__ : Any = our_model(__UpperCAmelCase ).logits
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase ), "The model logits don't match the original one."
UpperCamelCase__ : List[Any] = name
print(__UpperCAmelCase )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase__ : Union[str, Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"Pushed {checkpoint_name}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Path , __UpperCAmelCase: str = None , __UpperCAmelCase: bool = True ) -> List[str]:
UpperCamelCase__ : Any = '''imagenet-1k-id2label.json'''
UpperCamelCase__ : str = 1000
UpperCamelCase__ : List[str] = (1, num_labels)
UpperCamelCase__ : str = '''huggingface/label-files'''
UpperCamelCase__ : str = num_labels
UpperCamelCase__ : Dict = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : List[Any] = idalabel
UpperCamelCase__ : Dict = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = {
'''levit-128S''': 128,
'''levit-128''': 128,
'''levit-192''': 192,
'''levit-256''': 256,
'''levit-384''': 384,
}
UpperCamelCase__ : Optional[Any] = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 247 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Optional[Any]:
UpperCamelCase__ : Any = test_results.split(''' ''' )
UpperCamelCase__ : Dict = 0
UpperCamelCase__ : int = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCamelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(__UpperCAmelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> Tuple:
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : int = False
for line in failures_short_lines.split('''\n''' ):
if re.search(r'''_ \[doctest\]''' , __UpperCAmelCase ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCamelCase__ : List[Any] = line
UpperCamelCase__ : List[Any] = False
return failures
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = title
UpperCamelCase__ : Tuple = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCamelCase__ : Optional[Any] = doc_test_results['''success''']
UpperCamelCase__ : str = doc_test_results['''failures''']
UpperCamelCase__ : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCamelCase__ : List[Any] = doc_test_results
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = [self._time_spent]
UpperCamelCase__ : str = 0
for time in time_spent:
UpperCamelCase__ : List[Any] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__magic_name__ ) == 1:
UpperCamelCase__ : List[Any] = [0, 0, time_parts[0]]
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : int = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[str] = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f"{int(__magic_name__ )}h{int(__magic_name__ )}m{int(__magic_name__ )}s"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[Any] = 40
UpperCamelCase__ : Tuple = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(__magic_name__, __magic_name__ )}
UpperCamelCase__ : List[str] = ''''''
for category, failures in category_failures.items():
if len(__magic_name__ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__magic_name__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__magic_name__ )
@staticmethod
def UpperCamelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(__magic_name__ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text='''There was an issue running the tests.''', blocks=__magic_name__, )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCamelCase__ : List[str] = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
UpperCamelCase__ : Optional[Any] = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], blocks=self.payload, text=__magic_name__, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCamelCase__ : List[Any] = value[:200] + ''' [Truncated]''' if len(__magic_name__ ) > 250 else value
failures_text += f"*{key}*\n_{value}_\n\n"
UpperCamelCase__ : Union[str, Any] = job_name
UpperCamelCase__ : Any = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCamelCase__ : Union[str, Any] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCamelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCamelCase__ : Optional[int] = sorted(self.doc_test_results.items(), key=lambda __magic_name__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCamelCase__ : Any = f"*Num failures* :{len(job_result['failed'] )} \n"
UpperCamelCase__ : Optional[Any] = job_result['''failures''']
UpperCamelCase__ : Optional[Any] = self.get_reply_blocks(__magic_name__, __magic_name__, __magic_name__, text=__magic_name__ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''], text=f"Results for {job}", blocks=__magic_name__, thread_ts=self.thread_ts['''ts'''], )
time.sleep(1 )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCamelCase__ : Tuple = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
UpperCamelCase__ : Optional[int] = requests.get(__UpperCAmelCase ).json()
UpperCamelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCamelCase__ : List[Any] = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(__UpperCAmelCase ):
UpperCamelCase__ : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , __UpperCAmelCase )
return {}
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[int] = {}
if os.path.exists(__UpperCAmelCase ):
UpperCamelCase__ : Dict = os.listdir(__UpperCAmelCase )
for file in files:
try:
with open(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , encoding='''utf-8''' ) as f:
UpperCamelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__UpperCAmelCase , __UpperCAmelCase )}." ) from e
return _artifact
def lowerCAmelCase_ ( ) -> str:
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = name
UpperCamelCase__ : int = []
def __str__( self ) -> Tuple:
"""simple docstring"""
return self.name
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCamelCase__ : Dict[str, Artifact] = {}
UpperCamelCase__ : Union[str, Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCamelCase__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
UpperCamelCase__ : Union[str, Any] = Artifact(__UpperCAmelCase )
_available_artifacts[artifact_name].add_path(__UpperCAmelCase )
return _available_artifacts
if __name__ == "__main__":
UpperCAmelCase_ = get_job_links()
UpperCAmelCase_ = retrieve_available_artifacts()
UpperCAmelCase_ = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCAmelCase_ = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCAmelCase_ = github_actions_job_links.get('run_doctests')
UpperCAmelCase_ = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
UpperCAmelCase_ = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = handle_test_results(artifact['stats'])
UpperCAmelCase_ = failed
UpperCAmelCase_ = success
UpperCAmelCase_ = time_spent[1:-1] + ', '
UpperCAmelCase_ = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
UpperCAmelCase_ = line.replace('FAILED ', '')
UpperCAmelCase_ = line.split()[0].replace('\n', '')
if "::" in line:
UpperCAmelCase_ , UpperCAmelCase_ = line.split('::')
else:
UpperCAmelCase_ , UpperCAmelCase_ = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCAmelCase_ = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCAmelCase_ = all_failures[test] if test in all_failures else 'N/A'
UpperCAmelCase_ = failure
break
UpperCAmelCase_ = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 247 | 1 |
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 0
@slow
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_UpperCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_UpperCAmelCase ) , 0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoConfig.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
# Check that tokenizer_type ≠ model_type
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(_UpperCAmelCase , "vocab.txt" ) )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="bert" , use_fast=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(_UpperCAmelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(_UpperCAmelCase , "merges.txt" ) )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="gpt2" , use_fast=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.txt" , os.path.join(_UpperCAmelCase , "vocab.txt" ) )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="bert" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("./tests/fixtures/vocab.json" , os.path.join(_UpperCAmelCase , "vocab.json" ) )
shutil.copy("./tests/fixtures/merges.txt" , os.path.join(_UpperCAmelCase , "merges.txt" ) )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type="gpt2" )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with pytest.raises(_UpperCAmelCase ):
AutoTokenizer.from_pretrained("./" , tokenizer_type="xxx" )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowerCAmelCase = tokenizer_class.from_pretrained("wietsedv/bert-base-dutch-cased" )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , "julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier" , ):
__lowerCAmelCase = tokenizer_class.from_pretrained("julien-c/herlolip-not-exists" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TOKENIZER_MAPPING.values()
__lowerCAmelCase = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=_UpperCAmelCase ) , _UpperCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained("bert-base-cased" ) , _UpperCAmelCase )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("distilbert-base-uncased" , do_lower_case=_UpperCAmelCase )
__lowerCAmelCase = "Hello, world. How are you?"
__lowerCAmelCase = tokenizer.tokenize(_UpperCAmelCase )
self.assertEqual("[UNK]" , tokens[0] )
__lowerCAmelCase = AutoTokenizer.from_pretrained("microsoft/mpnet-base" , do_lower_case=_UpperCAmelCase )
__lowerCAmelCase = tokenizer.tokenize(_UpperCAmelCase )
self.assertEqual("[UNK]" , tokens[0] )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("robot-test/dummy-tokenizer-fast-with-model-config" )
self.assertEqual(type(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , "[UNK]" )
self.assertEqual(tokenizer.padding_side , "right" )
self.assertEqual(tokenizer.truncation_side , "right" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("ctrl" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = get_tokenizer_config("bert-base-cased" )
__lowerCAmelCase = config.pop("_commit_hash" , _UpperCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {"do_lower_case": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowerCAmelCase = get_tokenizer_config(_UpperCAmelCase )
self.assertDictEqual(_UpperCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = get_tokenizer_config(_UpperCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["tokenizer_class"] , "BertTokenizer" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
__lowerCAmelCase = CustomTokenizer.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , _UpperCAmelCase )
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase ):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCAmelCase = BertTokenizerFast.from_pretrained(_UpperCAmelCase )
bert_tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = CustomTokenizerFast.from_pretrained(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase )
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , "NewTokenizer" )
@require_tokenizers
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
class a__ ( lowerCamelCase_ ):
_a : Dict = False
class a__ ( lowerCamelCase_ ):
_a : int = NewTokenizer
_a : Any = False
try:
AutoConfig.register("custom" , _UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase )
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase )
# If remote code is not set, the default is to use local
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/test_dynamic_tokenizer" , use_fast=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertFalse(tokenizer.special_attribute_present )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
self.assertTrue(tokenizer.special_attribute_present )
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
__lowerCAmelCase = AutoTokenizer.from_pretrained(
"hf-internal-testing/test_dynamic_tokenizer_legacy" , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , "bert-base is not a local folder and is not a valid model identifier" ):
__lowerCAmelCase = AutoTokenizer.from_pretrained("bert-base" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_UpperCAmelCase , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__lowerCAmelCase = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision="aaaaaa" )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
with RequestCounter() as counter:
__lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 92 |
'''simple docstring'''
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (PNDMScheduler,)
lowerCAmelCase_ : Optional[int] = (("""num_inference_steps""", 50),)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE__ ( self : int , **_UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(**_UpperCAmelCase )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = 10
UpperCAmelCase__ = self.dummy_model()
UpperCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
UpperCAmelCase__ = model(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = dict(self.forward_default_kwargs )
UpperCAmelCase__ = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
UpperCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase__ = dummy_past_residuals[:]
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
UpperCAmelCase__ = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase__ = self.dummy_sample
UpperCAmelCase__ = 0.1 * sample
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
UpperCAmelCase__ = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler_classes[0]
UpperCAmelCase__ = self.get_scheduler_config()
UpperCAmelCase__ = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop()
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(prediction_type="""v_prediction""" )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
UpperCAmelCase__ = torch.sum(torch.abs(_UpperCAmelCase ) )
UpperCAmelCase__ = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 346 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = 0
if start < end:
snake_case_ = randint(snake_case , snake_case )
snake_case_ = a[end]
snake_case_ = a[pivot]
snake_case_ = temp
snake_case_ , snake_case_ = _in_place_partition(snake_case , snake_case , snake_case )
count += _in_place_quick_sort(snake_case , snake_case , p - 1 )
count += _in_place_quick_sort(snake_case , p + 1 , snake_case )
return count
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Any , snake_case : Any ):
'''simple docstring'''
snake_case_ = 0
snake_case_ = randint(snake_case , snake_case )
snake_case_ = a[end]
snake_case_ = a[pivot]
snake_case_ = temp
snake_case_ = start - 1
for index in range(snake_case , snake_case ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case_ = new_pivot_index + 1
snake_case_ = a[new_pivot_index]
snake_case_ = a[index]
snake_case_ = temp
snake_case_ = a[new_pivot_index + 1]
snake_case_ = a[end]
snake_case_ = temp
return new_pivot_index + 1, count
_SCREAMING_SNAKE_CASE : Union[str, Any] = TemporaryFile()
_SCREAMING_SNAKE_CASE : str = 100 # 1000 elements are to be sorted
_SCREAMING_SNAKE_CASE : str = 0, 1 # mean and standard deviation
_SCREAMING_SNAKE_CASE : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
_SCREAMING_SNAKE_CASE : int = np.load(outfile)
_SCREAMING_SNAKE_CASE : Any = len(M) - 1
_SCREAMING_SNAKE_CASE : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 354 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class _snake_case :
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=2 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=3 , a__=4 , a__=None , a__=1_000 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = tf.convert_to_tensor(a__ )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMModel(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
snake_case_ = model(a__ , a__ , token_type_ids=a__ )
snake_case_ = model(a__ , a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForMaskedLM(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForSequenceClassification(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> List[str]:
'''simple docstring'''
snake_case_ = self.num_labels
snake_case_ = TFLayoutLMForTokenClassification(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForQuestionAnswering(config=a__ )
snake_case_ = model(a__ , a__ , attention_mask=a__ , token_type_ids=a__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ : List[Any] = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[str] = 10
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
snake_case_ = TFLayoutLMModelTester(self )
snake_case_ = ConfigTester(self , config_class=a__ , hidden_size=37 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a__ )
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a__ )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFLayoutLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]] ) # noqa: E231
snake_case_ = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
snake_case_ = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class _snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the sequence output on [0, :3, :3]
snake_case_ = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a__ , atol=1e-3 ) )
# test the pooled output on [1, :3]
snake_case_ = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , a__ , atol=1e-3 ) )
@slow
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=2 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
snake_case_ = outputs.loss
snake_case_ = (2,)
self.assertEqual(loss.shape , a__ )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = (2, 2)
self.assertEqual(logits.shape , a__ )
@slow
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased" , num_labels=13 )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(
input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
# test the shape of the logits
snake_case_ = outputs.logits
snake_case_ = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , a__ )
@slow
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
snake_case_ = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = prepare_layoutlm_batch_inputs()
# forward pass
snake_case_ = model(input_ids=a__ , bbox=a__ , attention_mask=a__ , token_type_ids=a__ )
# test the shape of the logits
snake_case_ = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , a__ )
self.assertEqual(outputs.end_logits.shape , a__ )
| 92 | 0 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / """foo.lock""" ) )
_SCREAMING_SNAKE_CASE = 0.01
with locka.acquire():
with pytest.raises(snake_case__ ):
_SCREAMING_SNAKE_CASE = time.time()
locka.acquire(snake_case__ )
assert time.time() - _start > timeout
def __lowerCamelCase ( snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """a""" * 10_00 + """.lock"""
_SCREAMING_SNAKE_CASE = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(snake_case__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_55
_SCREAMING_SNAKE_CASE = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(snake_case__ ):
locka.acquire(0 )
| 306 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase = logging.get_logger(__name__)
# General docstring
UpperCamelCase = '''MobileNetV1Config'''
# Base docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = [1, 1_024, 7, 7]
# Image classification docstring
UpperCamelCase = '''google/mobilenet_v1_1.0_224'''
UpperCamelCase = '''tabby, tabby cat'''
UpperCamelCase = [
'''google/mobilenet_v1_1.0_224''',
'''google/mobilenet_v1_0.75_192''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = model.mobilenet_va
else:
_SCREAMING_SNAKE_CASE = model
_SCREAMING_SNAKE_CASE = """MobilenetV1/Conv2d_0/"""
_SCREAMING_SNAKE_CASE = backbone.conv_stem.convolution.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.bias
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.weight
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_mean
_SCREAMING_SNAKE_CASE = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = i * 2
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
_SCREAMING_SNAKE_CASE = backbone.layer[pt_index + 1]
_SCREAMING_SNAKE_CASE = F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_SCREAMING_SNAKE_CASE = pointer.convolution.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.bias
_SCREAMING_SNAKE_CASE = pointer.normalization.weight
_SCREAMING_SNAKE_CASE = pointer.normalization.running_mean
_SCREAMING_SNAKE_CASE = pointer.normalization.running_var
if isinstance(snake_case__ ,snake_case__ ):
_SCREAMING_SNAKE_CASE = """MobilenetV1/Logits/Conv2d_1c_1x1/"""
_SCREAMING_SNAKE_CASE = model.classifier.weight
_SCREAMING_SNAKE_CASE = model.classifier.bias
return tf_to_pt_map
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_SCREAMING_SNAKE_CASE = tf.train.list_variables(snake_case__ )
_SCREAMING_SNAKE_CASE = {}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
_SCREAMING_SNAKE_CASE = tf.train.load_variable(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = array
# Build TF to PyTorch weights loading map
_SCREAMING_SNAKE_CASE = _build_tf_to_pytorch_map(snake_case__ ,snake_case__ ,snake_case__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
_SCREAMING_SNAKE_CASE = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_SCREAMING_SNAKE_CASE = array.squeeze().transpose()
else:
_SCREAMING_SNAKE_CASE = np.transpose(snake_case__ ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
_SCREAMING_SNAKE_CASE = torch.from_numpy(snake_case__ )
tf_weights.pop(snake_case__ ,snake_case__ )
tf_weights.pop(name + """/RMSProp""" ,snake_case__ )
tf_weights.pop(name + """/RMSProp_1""" ,snake_case__ )
tf_weights.pop(name + """/ExponentialMovingAverage""" ,snake_case__ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> torch.Tensor:
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = features.shape[-2:]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.stride
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = conv_layer.kernel_size
if in_height % stride_height == 0:
_SCREAMING_SNAKE_CASE = max(kernel_height - stride_height ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
_SCREAMING_SNAKE_CASE = max(kernel_width - stride_width ,0 )
else:
_SCREAMING_SNAKE_CASE = max(kernel_width - (in_width % stride_width) ,0 )
_SCREAMING_SNAKE_CASE = pad_along_width // 2
_SCREAMING_SNAKE_CASE = pad_along_width - pad_left
_SCREAMING_SNAKE_CASE = pad_along_height // 2
_SCREAMING_SNAKE_CASE = pad_along_height - pad_top
_SCREAMING_SNAKE_CASE = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(snake_case__ ,snake_case__ ,"""constant""" ,0.0 )
class __UpperCAmelCase (nn.Module ):
def __init__( self: Optional[Any] , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: int , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: Optional[int] = 1 , UpperCAmelCase_: bool = False , UpperCAmelCase_: Optional[bool] = True , UpperCAmelCase_: Optional[bool or str] = True , ):
'''simple docstring'''
super().__init__()
_SCREAMING_SNAKE_CASE = config
if in_channels % groups != 0:
raise ValueError(F'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(F'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_SCREAMING_SNAKE_CASE = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_SCREAMING_SNAKE_CASE = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="""zeros""" , )
if use_normalization:
_SCREAMING_SNAKE_CASE = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = ACTaFN[config.hidden_act]
else:
_SCREAMING_SNAKE_CASE = config.hidden_act
else:
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: torch.Tensor ):
'''simple docstring'''
if self.config.tf_padding:
_SCREAMING_SNAKE_CASE = apply_tf_padding(UpperCAmelCase_ , self.convolution )
_SCREAMING_SNAKE_CASE = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
_SCREAMING_SNAKE_CASE = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
_SCREAMING_SNAKE_CASE = self.activation(UpperCAmelCase_ )
return features
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : Dict = MobileNetVaConfig
__snake_case : Any = load_tf_weights_in_mobilenet_va
__snake_case : Any = "mobilenet_v1"
__snake_case : List[Any] = "pixel_values"
__snake_case : Any = False
def UpperCamelCase ( self: str , UpperCAmelCase_: Union[nn.Linear, nn.Convad] ):
'''simple docstring'''
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
UpperCamelCase = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Any , UpperCAmelCase_: MobileNetVaConfig , UpperCAmelCase_: bool = True ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
_SCREAMING_SNAKE_CASE = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
_SCREAMING_SNAKE_CASE = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_SCREAMING_SNAKE_CASE = nn.ModuleList()
for i in range(13 ):
_SCREAMING_SNAKE_CASE = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_SCREAMING_SNAKE_CASE = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
_SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Tuple ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_SCREAMING_SNAKE_CASE = self.conv_stem(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_SCREAMING_SNAKE_CASE = layer_module(UpperCAmelCase_ )
if output_hidden_states:
_SCREAMING_SNAKE_CASE = all_hidden_states + (hidden_states,)
_SCREAMING_SNAKE_CASE = hidden_states
if self.pooler is not None:
_SCREAMING_SNAKE_CASE = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
_SCREAMING_SNAKE_CASE = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,_UpperCAmelCase ,)
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: Dict , UpperCAmelCase_: MobileNetVaConfig ):
'''simple docstring'''
super().__init__(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = config.num_labels
_SCREAMING_SNAKE_CASE = MobileNetVaModel(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_SCREAMING_SNAKE_CASE = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , UpperCAmelCase_: Optional[torch.Tensor] = None , UpperCAmelCase_: Optional[bool] = None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = outputs.pooler_output if return_dict else outputs[1]
_SCREAMING_SNAKE_CASE = self.classifier(self.dropout(UpperCAmelCase_ ) )
_SCREAMING_SNAKE_CASE = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_SCREAMING_SNAKE_CASE = """single_label_classification"""
else:
_SCREAMING_SNAKE_CASE = """multi_label_classification"""
if self.config.problem_type == "regression":
_SCREAMING_SNAKE_CASE = MSELoss()
if self.num_labels == 1:
_SCREAMING_SNAKE_CASE = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
_SCREAMING_SNAKE_CASE = CrossEntropyLoss()
_SCREAMING_SNAKE_CASE = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_SCREAMING_SNAKE_CASE = BCEWithLogitsLoss()
_SCREAMING_SNAKE_CASE = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
_SCREAMING_SNAKE_CASE = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 306 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowercase : List[Any] = TypeVar('_T')
class _UpperCAmelCase ( Generic[_T] ):
def __init__( self : int , _lowercase : Iterable[_T] | None = None ):
__UpperCAmelCase = list(iterable or [] )
__UpperCAmelCase = []
def __len__( self : Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Union[str, Any] ):
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def a ( self : Union[str, Any] , _lowercase : _T ):
self._stacka.append(_lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = self._stacka.pop
__UpperCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Tuple = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86 | 1 |
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = len(A_ ), len(grid[0] )
if (
min(A_, A_ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__magic_name__ = 0
count += depth_first_search(A_, row + 1, A_, A_ )
count += depth_first_search(A_, row - 1, A_, A_ )
count += depth_first_search(A_, A_, col + 1, A_ )
count += depth_first_search(A_, A_, col - 1, A_ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
_a = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any ) -> list[str]:
"""simple docstring"""
__lowerCAmelCase: int = set()
# keep track of all the paths to be checked
__lowerCAmelCase: str = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowerCAmelCase: str = queue.pop(0 )
# get the last node from the path
__lowerCAmelCase: Union[str, Any] = path[-1]
if node not in explored:
__lowerCAmelCase: Dict = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowerCAmelCase: Dict = list(SCREAMING_SNAKE_CASE )
new_path.append(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(SCREAMING_SNAKE_CASE )
# in case there's no path between the 2 nodes
return []
def _a ( SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowerCAmelCase: Optional[int] = [start]
__lowerCAmelCase: Dict = set(SCREAMING_SNAKE_CASE )
# Keep tab on distances from `start` node.
__lowerCAmelCase: Optional[int] = {start: 0, target: -1}
while queue:
__lowerCAmelCase: Any = queue.pop(0 )
if node == target:
__lowerCAmelCase: Optional[int] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(SCREAMING_SNAKE_CASE )
queue.append(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 322 | 0 |
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__UpperCAmelCase : List[str] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
__UpperCAmelCase : Union[str, Any] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
__UpperCAmelCase : Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/google-research/tree/master/rouge'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/ROUGE_(metric)''',
'''https://github.com/google-research/google-research/tree/master/rouge''',
] , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if rouge_types is None:
UpperCamelCase : int = ['''rouge1''', '''rouge2''', '''rougeL''', '''rougeLsum''']
UpperCamelCase : Dict = rouge_scorer.RougeScorer(rouge_types=__SCREAMING_SNAKE_CASE , use_stemmer=__SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCamelCase : List[str] = scoring.BootstrapAggregator()
else:
UpperCamelCase : Optional[int] = []
for ref, pred in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = scorer.score(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(__SCREAMING_SNAKE_CASE )
else:
scores.append(__SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCamelCase : int = aggregator.aggregate()
else:
UpperCamelCase : List[Any] = {}
for key in scores[0]:
UpperCamelCase : Optional[Any] = [score[key] for score in scores]
return result
| 315 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase : Tuple = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Let's go
UpperCamelCase : List[Any] = parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase : str = args.func(SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 315 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase_ : int , lowerCamelCase_ : Any=13 , lowerCamelCase_ : Optional[Any]=30 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : int=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : Tuple=32 , lowerCamelCase_ : Any=5 , lowerCamelCase_ : Union[str, Any]=4 , lowerCamelCase_ : Optional[Any]=37 , lowerCamelCase_ : Optional[int]="gelu" , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Optional[int]=10 , lowerCamelCase_ : Union[str, Any]=0.02 , lowerCamelCase_ : Any=3 , lowerCamelCase_ : Dict=0.6 , lowerCamelCase_ : Any=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : str = batch_size
SCREAMING_SNAKE_CASE : List[Any] = image_size
SCREAMING_SNAKE_CASE : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[Any] = is_training
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : str = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : Tuple = intermediate_size
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : Union[str, Any] = mask_ratio
SCREAMING_SNAKE_CASE : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE : Optional[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : List[Any] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = ViTMAEModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = ViTMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
SCREAMING_SNAKE_CASE : Tuple = ViTMAEForPreTraining(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase_ , nn.Linear ) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Any = model_class(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase_ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase_ )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCamelCase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE : int = pt_noise
super().check_pt_tf_models(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE : Tuple = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained(lowerCamelCase_ )
model.to(lowerCamelCase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(**self._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE : Optional[int] = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE : str = 0
SCREAMING_SNAKE_CASE : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase_ , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def __A ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
np.random.seed(2 )
SCREAMING_SNAKE_CASE : Tuple = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.default_image_processor
SCREAMING_SNAKE_CASE : Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE : List[str] = image_processor(images=lowerCamelCase_ , return_tensors="""pt""" ).to(lowerCamelCase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTMAEConfig()
SCREAMING_SNAKE_CASE : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCamelCase_ , noise=torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ ) )
# verify the logits
SCREAMING_SNAKE_CASE : Dict = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowerCamelCase_ ) , atol=1e-4 ) )
| 323 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 3_84
SCREAMING_SNAKE_CASE : Union[str, Any] = 7
if "tiny" in model_name:
SCREAMING_SNAKE_CASE : List[str] = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 6, 2)
SCREAMING_SNAKE_CASE : List[Any] = (3, 6, 12, 24)
elif "small" in model_name:
SCREAMING_SNAKE_CASE : Any = 96
SCREAMING_SNAKE_CASE : List[str] = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (3, 6, 12, 24)
elif "base" in model_name:
SCREAMING_SNAKE_CASE : int = 1_28
SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : int = (4, 8, 16, 32)
SCREAMING_SNAKE_CASE : Optional[Any] = 12
SCREAMING_SNAKE_CASE : str = 5_12
elif "large" in model_name:
SCREAMING_SNAKE_CASE : Tuple = 1_92
SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
SCREAMING_SNAKE_CASE : List[str] = (6, 12, 24, 48)
SCREAMING_SNAKE_CASE : Tuple = 12
SCREAMING_SNAKE_CASE : Union[str, Any] = 7_68
# set label information
SCREAMING_SNAKE_CASE : List[str] = 1_50
SCREAMING_SNAKE_CASE : Optional[Any] = """huggingface/label-files"""
SCREAMING_SNAKE_CASE : List[str] = """ade20k-id2label.json"""
SCREAMING_SNAKE_CASE : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE : str = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : int = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = SwinConfig(
embed_dim=lowerCamelCase_ , depths=lowerCamelCase_ , num_heads=lowerCamelCase_ , window_size=lowerCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
SCREAMING_SNAKE_CASE : List[str] = UperNetConfig(
backbone_config=lowerCamelCase_ , auxiliary_in_channels=lowerCamelCase_ , num_labels=lowerCamelCase_ , idalabel=lowerCamelCase_ , labelaid=lowerCamelCase_ , )
return config
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = dct.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = val
def __A ( lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE : Dict = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE : int = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE : Any = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE : str = in_proj_bias[-dim :]
# fmt: on
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Tuple = x.shape
SCREAMING_SNAKE_CASE : Any = x.reshape(lowerCamelCase_ , 4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : Any = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = x.shape
SCREAMING_SNAKE_CASE : Dict = x.reshape(lowerCamelCase_ , in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(lowerCamelCase_ , lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = x.shape[0]
SCREAMING_SNAKE_CASE : List[str] = x.reshape(4 , in_channel // 4 )
SCREAMING_SNAKE_CASE : str = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = x.shape[0]
SCREAMING_SNAKE_CASE : Optional[int] = x.reshape(in_channel // 4 , 4 )
SCREAMING_SNAKE_CASE : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(lowerCamelCase_ )
return x
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
SCREAMING_SNAKE_CASE : List[str] = model_name_to_url[model_name]
SCREAMING_SNAKE_CASE : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase_ , map_location="""cpu""" , file_name=lowerCamelCase_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowerCamelCase_ , param.shape )
SCREAMING_SNAKE_CASE : Dict = get_upernet_config(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = UperNetForSemanticSegmentation(lowerCamelCase_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = state_dict.pop(lowerCamelCase_ )
if "bn" in key:
SCREAMING_SNAKE_CASE : List[str] = key.replace("""bn""" , """batch_norm""" )
SCREAMING_SNAKE_CASE : Optional[Any] = val
# rename keys
SCREAMING_SNAKE_CASE : Union[str, Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
read_in_q_k_v(lowerCamelCase_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
SCREAMING_SNAKE_CASE : Tuple = reverse_correct_unfold_reduction_order(lowerCamelCase_ )
if "norm" in key:
SCREAMING_SNAKE_CASE : Optional[int] = reverse_correct_unfold_norm_order(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
# verify on image
SCREAMING_SNAKE_CASE : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
SCREAMING_SNAKE_CASE : Tuple = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
SCREAMING_SNAKE_CASE : Optional[int] = SegformerImageProcessor()
SCREAMING_SNAKE_CASE : str = processor(lowerCamelCase_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCamelCase_ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-swin-tiny""",
type=str,
choices=[f'''upernet-swin-{size}''' for size in ["""tiny""", """small""", """base""", """large"""]],
help="""Name of the Swin + UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__UpperCAmelCase = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 323 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
lowerCamelCase :Tuple = numpy.array([0, 0])
lowerCamelCase :Optional[Any] = numpy.array([0.5, 0.8_660_254])
lowerCamelCase :Union[str, Any] = numpy.array([1, 0])
lowerCamelCase :Tuple = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[Any] = initial_vectors
for _ in range(lowerCamelCase__ ):
A_ : Any = iteration_step(lowerCamelCase__ )
return vectors
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = []
for i, start_vector in enumerate(vectors[:-1] ):
A_ : str = vectors[i + 1]
new_vectors.append(lowerCamelCase__ )
A_ : str = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = numpy.radians(lowerCamelCase__ )
A_, A_ : str = numpy.cos(lowerCamelCase__ ), numpy.sin(lowerCamelCase__ )
A_ : Optional[int] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCamelCase__ , lowerCamelCase__ )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A_, A_ : Optional[int] = zip(*lowerCamelCase__ )
plt.plot(lowerCamelCase__ , lowerCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :Optional[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 135 |
'''simple docstring'''
def a ( ):
'''simple docstring'''
A_ : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A_ : Dict = 6
A_ : List[Any] = 1
A_ : Optional[Any] = 19_01
A_ : Tuple = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A_ : Optional[Any] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A_ : str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A_ : List[str] = day - days_per_month[month - 2]
if month > 12:
year += 1
A_ : Tuple = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 135 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class UpperCAmelCase_ ( _a ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Dict:
super().__init__(
features=__lowerCamelCase , cache_dir=__lowerCamelCase , keep_in_memory=__lowerCamelCase , streaming=__lowerCamelCase , num_proc=__lowerCamelCase , **__lowerCamelCase , )
__lowercase : Union[str, Any] = Generator(
cache_dir=__lowerCamelCase , features=__lowerCamelCase , generator=__lowerCamelCase , gen_kwargs=__lowerCamelCase , **__lowerCamelCase , )
def _lowerCamelCase ( self ) -> Union[str, Any]:
# Build iterable dataset
if self.streaming:
__lowercase : Any = self.builder.as_streaming_dataset(split='''train''' )
# Build regular (map-style) dataset
else:
__lowercase : Tuple = None
__lowercase : Dict = None
__lowercase : Dict = None
__lowercase : List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCamelCase , download_mode=__lowerCamelCase , verification_mode=__lowerCamelCase , base_path=__lowerCamelCase , num_proc=self.num_proc , )
__lowercase : Tuple = self.builder.as_dataset(
split='''train''' , verification_mode=__lowerCamelCase , in_memory=self.keep_in_memory )
return dataset
| 249 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : Tuple = ShapEImgaImgPipeline
snake_case__ : Optional[Any] = ["""image"""]
snake_case__ : Union[str, Any] = ["""image"""]
snake_case__ : Optional[Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
snake_case__ : List[str] = False
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Any ):
return 32
@property
def _A ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def _A ( self : Union[str, Any] ):
return 8
@property
def _A ( self : int ):
torch.manual_seed(0 )
UpperCamelCase :Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
UpperCamelCase :Optional[int] = CLIPVisionModel(__lowerCamelCase )
return model
@property
def _A ( self : str ):
UpperCamelCase :Optional[int] = CLIPImageProcessor(
crop_size=224 , do_center_crop=__lowerCamelCase , do_normalize=__lowerCamelCase , do_resize=__lowerCamelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
@property
def _A ( self : Tuple ):
torch.manual_seed(0 )
UpperCamelCase :Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
UpperCamelCase :int = PriorTransformer(**__lowerCamelCase )
return model
@property
def _A ( self : Optional[int] ):
torch.manual_seed(0 )
UpperCamelCase :str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
UpperCamelCase :List[str] = ShapERenderer(**__lowerCamelCase )
return model
def _A ( self : str ):
UpperCamelCase :int = self.dummy_prior
UpperCamelCase :Any = self.dummy_image_encoder
UpperCamelCase :Dict = self.dummy_image_processor
UpperCamelCase :List[Any] = self.dummy_renderer
UpperCamelCase :int = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1_024 , prediction_type="""sample""" , use_karras_sigmas=__lowerCamelCase , clip_sample=__lowerCamelCase , clip_sample_range=1.0 , )
UpperCamelCase :Optional[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _A ( self : int , __lowerCamelCase : int , __lowerCamelCase : Any=0 ):
UpperCamelCase :Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCamelCase :List[Any] = torch.manual_seed(__lowerCamelCase )
else:
UpperCamelCase :Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCamelCase :Optional[Any] = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _A ( self : List[str] ):
UpperCamelCase :Dict = """cpu"""
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :int = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
UpperCamelCase :Dict = output.images[0]
UpperCamelCase :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCamelCase :Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self : List[Any] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self : List[Any] ):
UpperCamelCase :str = torch_device == """cpu"""
UpperCamelCase :int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__lowerCamelCase , relax_max_difference=__lowerCamelCase , )
def _A ( self : List[Any] ):
UpperCamelCase :List[Any] = self.get_dummy_components()
UpperCamelCase :Optional[int] = self.pipeline_class(**__lowerCamelCase )
UpperCamelCase :List[Any] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Any = 1
UpperCamelCase :int = 2
UpperCamelCase :Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCamelCase :str = batch_size * [inputs[key]]
UpperCamelCase :Optional[int] = pipe(**__lowerCamelCase , num_images_per_prompt=__lowerCamelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Any ):
UpperCamelCase :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
UpperCamelCase :Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
UpperCamelCase :Union[str, Any] = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
UpperCamelCase :List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCamelCase :Optional[Any] = torch.Generator(device=__lowerCamelCase ).manual_seed(0 )
UpperCamelCase :Optional[int] = pipe(
__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 38 | 0 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ["model.decoder.embed_positions.weights"]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if "emb" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('emb' , 'model.decoder.embed_tokens' )
if "transformer" in name:
lowerCAmelCase__ : List[str] = name.replace('transformer' , 'model.decoder' )
if "cross_attention" in name:
lowerCAmelCase__ : Dict = name.replace('cross_attention' , 'encoder_attn' )
if "linear1" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('linear1' , 'fc1' )
if "linear2" in name:
lowerCAmelCase__ : List[str] = name.replace('linear2' , 'fc2' )
if "norm1" in name:
lowerCAmelCase__ : Optional[int] = name.replace('norm1' , 'self_attn_layer_norm' )
if "norm_cross" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('norm_cross' , 'encoder_attn_layer_norm' )
if "norm2" in name:
lowerCAmelCase__ : Any = name.replace('norm2' , 'final_layer_norm' )
if "out_norm" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('out_norm' , 'model.decoder.layer_norm' )
if "linears" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('linears' , 'lm_heads' )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCAmelCase__ : Any = name.replace('condition_provider.conditioners.description.output_proj' , 'enc_to_dec_proj' )
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple[Dict, Dict]:
lowerCAmelCase__ : int = list(state_dict.keys() )
lowerCAmelCase__ : Dict = {}
for key in keys:
lowerCAmelCase__ : Any = state_dict.pop(_snake_case )
lowerCAmelCase__ : Optional[Any] = rename_keys(_snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCAmelCase__ : Optional[int] = val[:hidden_size, :]
lowerCAmelCase__ : List[str] = val[hidden_size : 2 * hidden_size, :]
lowerCAmelCase__ : List[str] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCAmelCase__ : int = val
else:
lowerCAmelCase__ : str = val
return state_dict, enc_dec_proj_state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
lowerCAmelCase__ : Tuple = 1_024
lowerCAmelCase__ : List[str] = 24
lowerCAmelCase__ : str = 16
elif checkpoint == "medium":
lowerCAmelCase__ : Optional[int] = 1_536
lowerCAmelCase__ : Dict = 48
lowerCAmelCase__ : List[Any] = 24
elif checkpoint == "large":
lowerCAmelCase__ : Any = 2_048
lowerCAmelCase__ : int = 48
lowerCAmelCase__ : str = 32
else:
raise ValueError(F'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
lowerCAmelCase__ : str = MusicgenDecoderConfig(
hidden_size=_snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=_snake_case , num_attention_heads=_snake_case , )
return config
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="cpu" ) -> List[str]:
lowerCAmelCase__ : Dict = MusicGen.get_pretrained(_snake_case , device=_snake_case )
lowerCAmelCase__ : Any = decoder_config_from_checkpoint(_snake_case )
lowerCAmelCase__ : Any = fairseq_model.lm.state_dict()
lowerCAmelCase__ : Optional[Any] = rename_state_dict(
_snake_case , hidden_size=decoder_config.hidden_size )
lowerCAmelCase__ : str = TaEncoderModel.from_pretrained('t5-base' )
lowerCAmelCase__ : Any = EncodecModel.from_pretrained('facebook/encodec_32khz' )
lowerCAmelCase__ : int = MusicgenForCausalLM(_snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCAmelCase__ : List[str] = decoder.load_state_dict(_snake_case , strict=_snake_case )
for key in missing_keys.copy():
if key.startswith(('text_encoder', 'audio_encoder') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_snake_case )
if len(_snake_case ) > 0:
raise ValueError(F'''Missing key(s) in state_dict: {missing_keys}''' )
if len(_snake_case ) > 0:
raise ValueError(F'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
lowerCAmelCase__ : Optional[Any] = MusicgenForConditionalGeneration(text_encoder=_snake_case , audio_encoder=_snake_case , decoder=_snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_snake_case )
# check we can do a forward pass
lowerCAmelCase__ : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCAmelCase__ : List[Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCAmelCase__ : Dict = model(input_ids=_snake_case , decoder_input_ids=_snake_case ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('Incorrect shape for logits' )
# now construct the processor
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('t5-base' )
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained('facebook/encodec_32khz' , padding_side='left' )
lowerCAmelCase__ : Union[str, Any] = MusicgenProcessor(feature_extractor=_snake_case , tokenizer=_snake_case )
# set the appropriate bos/pad token ids
lowerCAmelCase__ : List[str] = 2_048
lowerCAmelCase__ : List[str] = 2_048
# set other default generation config params
lowerCAmelCase__ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
logger.info(F'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if repo_id:
logger.info(F'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(_snake_case )
processor.push_to_hub(_snake_case )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub) | 369 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] ) | 307 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
A__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ["""BeitFeatureExtractor"""]
A__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 | 0 |
'''simple docstring'''
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
_UpperCAmelCase : str = """sshleifer/mar_enro_6_3_student"""
class a__ ( __A ):
"""simple docstring"""
def _snake_case (self ):
super().setUp()
__lowerCAmelCase = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=__lowercase , )
__lowerCAmelCase = F"""{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k"""
@slow
@require_torch_gpu
def _snake_case (self ):
MarianMTModel.from_pretrained(__lowercase )
@slow
@require_torch_gpu
def _snake_case (self ):
__lowerCAmelCase = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__lowerCAmelCase = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__lowercase , str(__lowercase ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowerCAmelCase = F"""
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
""".split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowerCAmelCase = ['''finetune.py'''] + bash_script.split() + args
with patch.object(__lowercase , '''argv''' , __lowercase ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__lowercase )
__lowerCAmelCase = SummarizationModule.add_model_specific_args(__lowercase , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = main(__lowercase )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics['''val'''][0]
__lowerCAmelCase = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowercase )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.0_1 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__lowercase )
__lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __lowercase )
__lowerCAmelCase = torch.load(__lowercase , map_location='''cpu''' )
__lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class a__ ( __A ):
"""simple docstring"""
@timeout_decorator.timeout(6_00 )
@slow
@require_torch_gpu
def _snake_case (self ):
__lowerCAmelCase = F"""{self.test_file_dir_str}/test_data/wmt_en_ro"""
__lowerCAmelCase = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 1_28,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__lowerCAmelCase = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__lowerCAmelCase = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__lowerCAmelCase = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__lowerCAmelCase = bash_script.replace(__lowercase , str(__lowercase ) )
__lowerCAmelCase = self.get_auto_remove_tmp_dir()
__lowerCAmelCase = bash_script.replace('''--fp16''' , '''''' )
__lowerCAmelCase = 6
__lowerCAmelCase = (
['''distillation.py''']
+ bash_script.split()
+ [
F"""--output_dir={output_dir}""",
'''--gpus=1''',
'''--learning_rate=1e-3''',
F"""--num_train_epochs={epochs}""",
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(__lowercase , '''argv''' , __lowercase ):
__lowerCAmelCase = argparse.ArgumentParser()
__lowerCAmelCase = pl.Trainer.add_argparse_args(__lowercase )
__lowerCAmelCase = SummarizationDistiller.add_model_specific_args(__lowercase , os.getcwd() )
__lowerCAmelCase = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowerCAmelCase = distill_main(__lowercase )
# Check metrics
__lowerCAmelCase = load_json(model.metrics_save_path )
__lowerCAmelCase = metrics['''val'''][0]
__lowerCAmelCase = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.0_1
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[F"""val_avg_{model.val_metric}"""] , __lowercase )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowerCAmelCase = os.listdir(__lowercase )
__lowerCAmelCase = [x for x in contents if x.endswith('''.ckpt''' )][0]
__lowerCAmelCase = os.path.join(args.output_dir , __lowercase )
__lowerCAmelCase = torch.load(__lowercase , map_location='''cpu''' )
__lowerCAmelCase = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowerCAmelCase = {os.path.basename(__lowercase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 370 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Dict = """true"""
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=1_6):
set_seed(4_2)
__lowerCAmelCase = RegressionModel()
__lowerCAmelCase = deepcopy(lowerCamelCase)
__lowerCAmelCase = RegressionDataset(length=lowerCamelCase)
__lowerCAmelCase = DataLoader(lowerCamelCase, batch_size=lowerCamelCase)
model.to(accelerator.device)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return model, ddp_model, dataloader
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''')
__lowerCAmelCase = load_dataset('''glue''', '''mrpc''', split='''validation''')
def tokenize_function(lowerCamelCase):
__lowerCAmelCase = tokenizer(examples['''sentence1'''], examples['''sentence2'''], truncation=lowerCamelCase, max_length=lowerCamelCase)
return outputs
with accelerator.main_process_first():
__lowerCAmelCase = dataset.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['''idx''', '''sentence1''', '''sentence2'''], )
__lowerCAmelCase = tokenized_datasets.rename_column('''label''', '''labels''')
def collate_fn(lowerCamelCase):
if use_longest:
return tokenizer.pad(lowerCamelCase, padding='''longest''', return_tensors='''pt''')
return tokenizer.pad(lowerCamelCase, padding='''max_length''', max_length=1_2_8, return_tensors='''pt''')
return DataLoader(lowerCamelCase, shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=1_6)
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = Accelerator(dispatch_batches=lowerCamelCase, split_batches=lowerCamelCase)
__lowerCAmelCase = get_dataloader(lowerCamelCase, not dispatch_batches)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''', return_dict=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(lowerCamelCase, lowerCamelCase)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = []
for batch in dataloader:
__lowerCAmelCase , __lowerCAmelCase = batch.values()
with torch.no_grad():
__lowerCAmelCase = model(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__lowerCAmelCase , __lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCamelCase)
targs.append(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = torch.cat(lowerCamelCase), torch.cat(lowerCamelCase)
return logits, targs
def __magic_name__( lowerCamelCase, lowerCamelCase=8_2, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=1_6):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_basic_setup(lowerCamelCase, lowerCamelCase, lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = generate_predictions(lowerCamelCase, lowerCamelCase, lowerCamelCase)
assert (
len(lowerCamelCase) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCamelCase)}"""
def __magic_name__( lowerCamelCase = False, lowerCamelCase = False):
__lowerCAmelCase = evaluate.load('''glue''', '''mrpc''')
__lowerCAmelCase , __lowerCAmelCase = get_mrpc_setup(lowerCamelCase, lowerCamelCase)
# First do baseline
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''no''']
model.to(lowerCamelCase)
model.eval()
for batch in dataloader:
batch.to(lowerCamelCase)
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCamelCase, references=batch['''labels'''])
__lowerCAmelCase = metric.compute()
# Then do distributed
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase = model(**lowerCamelCase)
__lowerCAmelCase = outputs.logits.argmax(dim=-1)
__lowerCAmelCase = batch['''labels''']
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCamelCase, references=lowerCamelCase)
__lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key], distributed[key]), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __magic_name__( ):
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""")
test_mrpc(lowerCamelCase, lowerCamelCase)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''')
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase = Accelerator(split_batches=lowerCamelCase, dispatch_batches=lowerCamelCase)
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""")
test_torch_metrics(lowerCamelCase, 9_9)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''')
__lowerCAmelCase = Accelerator()
test_torch_metrics(lowerCamelCase, 5_1_2)
accelerator.state._reset_state()
def __magic_name__( lowerCamelCase):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 9 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 220 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( snake_case_ :List[Any] ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
__UpperCAmelCase = [image]
__UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase = torch.stack(snake_case_ )
return image
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowercase : str , _lowercase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def a ( self : int , _lowercase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
# get the original timestep using init_timestep
__UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
__UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ):
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = init_latents.shape
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print('''add noise to latents at timestep''' , _lowercase )
__UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
self.check_inputs(_lowercase )
# 2. Preprocess image
__UpperCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
__UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
__UpperCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
__UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
__UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 332 | 0 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def a__ ( __UpperCamelCase ):
if "model" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.split("." )[0].split("_" )[-1]
SCREAMING_SNAKE_CASE_ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
SCREAMING_SNAKE_CASE_ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
SCREAMING_SNAKE_CASE_ = "yoso." + orig_key
return orig_key
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = orig_state_dict.pop(__UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = orig_state_dict["cls.predictions.decoder.bias"]
SCREAMING_SNAKE_CASE_ = torch.arange(__UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = torch.load(__UpperCamelCase , map_location="cpu" )["model_state_dict"]
SCREAMING_SNAKE_CASE_ = YosoConfig.from_json_file(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = YosoForMaskedLM(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = convert_checkpoint_helper(config.max_position_embeddings , __UpperCamelCase )
print(model.load_state_dict(__UpperCamelCase ) )
model.eval()
model.save_pretrained(__UpperCamelCase )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
A : int = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 305 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A : List[str] = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
A : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 305 | 1 |
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
# edges = list of graph's edges
__lowerCamelCase = get_edges(A__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__lowerCamelCase, __lowerCamelCase = edges.pop()
chosen_vertices.add(A__ )
chosen_vertices.add(A__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(A__ )
return chosen_vertices
def lowerCamelCase__ ( A__ : dict ):
'''simple docstring'''
__lowerCamelCase = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 12 |
'''simple docstring'''
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def a_ ( *_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase=True ,_lowerCAmelCase=2 ) -> List[str]:
from .. import __version__
__lowerCamelCase : Any = take_from
__lowerCamelCase : Optional[int] = ()
if not isinstance(args[0] ,_lowerCAmelCase ):
__lowerCamelCase : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_lowerCAmelCase ).base_version ) >= version.parse(_lowerCAmelCase ):
raise ValueError(
F'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
F' version {__version__} is >= {version_name}' )
__lowerCamelCase : Union[str, Any] = None
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_lowerCAmelCase ),)
__lowerCamelCase : Optional[Any] = F'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
values += (getattr(_lowerCAmelCase ,_lowerCAmelCase ),)
__lowerCamelCase : List[str] = F'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
__lowerCamelCase : Optional[Any] = F'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
__lowerCamelCase : Optional[int] = warning + ' ' if standard_warn else ''
warnings.warn(warning + message ,_lowerCAmelCase ,stacklevel=_lowerCAmelCase )
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
__lowerCamelCase : List[str] = call_frame.filename
__lowerCamelCase : int = call_frame.lineno
__lowerCamelCase : Union[str, Any] = call_frame.function
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(_lowerCAmelCase ) == 0:
return
elif len(_lowerCAmelCase ) == 1:
return values[0]
return values
| 208 | 0 |
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE_ :
def __init__( self : Union[str, Any] , lowerCamelCase_ : str = None ):
"""simple docstring"""
UpperCamelCase = value
UpperCamelCase = random()
UpperCamelCase = None
UpperCamelCase = None
def __repr__( self : Tuple ):
"""simple docstring"""
from pprint import pformat
if self.left is None and self.right is None:
return f"""\'{self.value}: {self.prior:.5}\'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = str(self.value ) + """ """
UpperCamelCase = str(self.left or """""" )
UpperCamelCase = str(self.right or """""" )
return value + left + right
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCamelCase = split(root.left , lowerCAmelCase__ )
return left, root
else:
UpperCamelCase = split(root.right , lowerCAmelCase__ )
return root, right
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCamelCase = merge(left.right , lowerCAmelCase__ )
return left
else:
UpperCamelCase = merge(lowerCAmelCase__ , right.left )
return right
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = Node(lowerCAmelCase__ )
UpperCamelCase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(merge(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase = split(lowerCAmelCase__ , value - 1 )
UpperCamelCase = split(lowerCAmelCase__ , lowerCAmelCase__ )
return merge(lowerCAmelCase__ , lowerCAmelCase__ )
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
UpperCamelCase = insert(lowerCAmelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
UpperCamelCase = erase(lowerCAmelCase__ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def lowercase( ) -> List[str]:
'''simple docstring'''
UpperCamelCase = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. 'q' to quit. """ )
UpperCamelCase = input()
while args != "q":
UpperCamelCase = interact_treap(lowerCAmelCase__ , lowerCAmelCase__ )
print(lowerCAmelCase__ )
UpperCamelCase = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 366 | import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
_SCREAMING_SNAKE_CASE = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
_SCREAMING_SNAKE_CASE = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print("""\n""".join(upper_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print("""\n""".join(space_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print("""\n""".join(hyphen_files) + """\n""")
_SCREAMING_SNAKE_CASE = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print("""\n""".join(nodir_files) + """\n""")
_SCREAMING_SNAKE_CASE = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 165 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = DanceDiffusionPipeline
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''callback''',
'''latents''',
'''callback_steps''',
'''output_type''',
'''num_images_per_prompt''',
}
SCREAMING_SNAKE_CASE__ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> Dict:
torch.manual_seed(0 )
_lowercase =UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=UpperCAmelCase , use_timestep_embedding=UpperCAmelCase , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
_lowercase =IPNDMScheduler()
_lowercase ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def __A (self , UpperCAmelCase , UpperCAmelCase=0 ) -> Tuple:
if str(UpperCAmelCase ).startswith('''mps''' ):
_lowercase =torch.manual_seed(UpperCAmelCase )
else:
_lowercase =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_lowercase ={
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def __A (self ) -> Tuple:
_lowercase ='''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowercase =self.get_dummy_components()
_lowercase =DanceDiffusionPipeline(**UpperCAmelCase )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =self.get_dummy_inputs(UpperCAmelCase )
_lowercase =pipe(**UpperCAmelCase )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_lowercase =np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __A (self ) -> Any:
return super().test_save_load_local()
@skip_mps
def __A (self ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def __A (self ) -> Any:
return super().test_save_load_optional_components()
@skip_mps
def __A (self ) -> Optional[Any]:
return super().test_attention_slicing_forward_pass()
def __A (self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
def __A (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A (self ) -> List[str]:
_lowercase =torch_device
_lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase =np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def __A (self ) -> Optional[Any]:
_lowercase =torch_device
_lowercase =DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
_lowercase =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_lowercase =torch.manual_seed(0 )
_lowercase =pipe(generator=UpperCAmelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
_lowercase =output.audios
_lowercase =audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_lowercase =np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 5 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_SCREAMING_SNAKE_CASE = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', F'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', F'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', F'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', F'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.weight''', F'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', F'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', F'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', F'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.weight''', F'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', F'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', F'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', F'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', F'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', F'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.sa_v_proj.bias''', F'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', F'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', F'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', F'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.ca_v_proj.bias''', F'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', F'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""),
("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""),
("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""),
("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""),
("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""),
("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""),
("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""),
("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""),
("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""),
("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""),
]
)
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
def lowercase( UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCamelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
UpperCamelCase = value
else:
UpperCamelCase = value
return new_state_dict
def lowercase( UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = """"""
if is_panoptic:
UpperCamelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
UpperCamelCase = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:256, :]
UpperCamelCase = in_proj_bias[:256]
UpperCamelCase = in_proj_weight[256:512, :]
UpperCamelCase = in_proj_bias[256:512]
UpperCamelCase = in_proj_weight[-256:, :]
UpperCamelCase = in_proj_bias[-256:]
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCamelCase = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Any:
'''simple docstring'''
UpperCamelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
UpperCamelCase = """resnet101"""
if "dc5" in model_name:
UpperCamelCase = True
UpperCamelCase = """panoptic""" in model_name
if is_panoptic:
UpperCamelCase = 250
else:
UpperCamelCase = 91
UpperCamelCase = """huggingface/label-files"""
UpperCamelCase = """coco-detection-id2label.json"""
UpperCamelCase = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) )
UpperCamelCase = {int(UpperCamelCase_ ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
# load image processor
UpperCamelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
UpperCamelCase = ConditionalDetrImageProcessor(format=UpperCamelCase_ )
# prepare image
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""pt""" )
UpperCamelCase = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
UpperCamelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , UpperCamelCase_ , pretrained=UpperCamelCase_ ).eval()
UpperCamelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
UpperCamelCase = """conditional_detr.""" + src
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase = rename_backbone_keys(UpperCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCamelCase_ , is_panoptic=UpperCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCamelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
UpperCamelCase = state_dict.pop(UpperCamelCase_ )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = ConditionalDetrForSegmentation(UpperCamelCase_ ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCamelCase_ )
model.load_state_dict(UpperCamelCase_ )
model.eval()
model.push_to_hub(repo_id=UpperCamelCase_ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
UpperCamelCase = conditional_detr(UpperCamelCase_ )
UpperCamelCase = model(UpperCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
image_processor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""conditional_detr_resnet50""",
type=str,
help="""Name of the CONDITIONAL_DETR model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 343 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[Any] = 'bridgetower_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=288 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1e-05 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = hidden_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Tuple = patch_size
UpperCamelCase : Any = image_size
UpperCamelCase : Union[str, Any] = initializer_factor
UpperCamelCase : Optional[int] = layer_norm_eps
UpperCamelCase : Dict = stop_gradient
UpperCamelCase : List[str] = share_layernorm
UpperCamelCase : Dict = remove_last_layer
@classmethod
def a_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase , UpperCamelCase : int = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config_dict.get("""model_type""" ) == "bridgetower":
UpperCamelCase : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : int = 'bridgetower_text_model'
def __init__( self , SCREAMING_SNAKE_CASE_=5_0265 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=514 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1e-05 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Dict = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : List[Any] = initializer_factor
UpperCamelCase : Tuple = intermediate_size
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Tuple = attention_probs_dropout_prob
UpperCamelCase : Optional[Any] = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : Dict = layer_norm_eps
UpperCamelCase : int = position_embedding_type
UpperCamelCase : Optional[Any] = use_cache
UpperCamelCase : Union[str, Any] = pad_token_id
UpperCamelCase : Tuple = bos_token_id
UpperCamelCase : List[Any] = eos_token_id
@classmethod
def a_ ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase , UpperCamelCase : List[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if config_dict.get("""model_type""" ) == "bridgetower":
UpperCamelCase : Optional[Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class lowerCamelCase ( _UpperCAmelCase ):
lowercase : List[str] = 'bridgetower'
def __init__( self , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=1e-05 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_="add" , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
# TODO: remove this once the Hub files are updated.
UpperCamelCase : List[str] = kwargs.pop("""text_config_dict""" , SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = kwargs.pop("""vision_config_dict""" , SCREAMING_SNAKE_CASE_ )
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = share_cross_modal_transformer_layers
UpperCamelCase : str = hidden_act
UpperCamelCase : Any = hidden_size
UpperCamelCase : Optional[Any] = initializer_factor
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Optional[Any] = share_link_tower_layers
UpperCamelCase : Optional[int] = link_tower_type
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Union[str, Any] = tie_word_embeddings
UpperCamelCase : List[str] = init_layernorm_from_vision_encoder
if text_config is None:
UpperCamelCase : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
UpperCamelCase : List[Any] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
UpperCamelCase : Optional[Any] = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE_ )
@classmethod
def a_ ( cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = copy.deepcopy(self.__dict__ )
UpperCamelCase : str = self.text_config.to_dict()
UpperCamelCase : int = self.vision_config.to_dict()
UpperCamelCase : List[str] = self.__class__.model_type
return output
| 27 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCamelCase ( nn.Module ):
def __init__( self , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = 88 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "geglu" , SCREAMING_SNAKE_CASE_ = None , ):
super().__init__()
UpperCamelCase : int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCamelCase : Optional[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCamelCase : List[Any] = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCamelCase : int = [1, 0]
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
UpperCamelCase : Dict = hidden_states
UpperCamelCase : Optional[Any] = []
UpperCamelCase : List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCamelCase : Optional[int] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCamelCase : str = self.transformer_index_for_condition[i]
UpperCamelCase : Any = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCamelCase : Any = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCamelCase : List[str] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 27 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase = 16 ):
SCREAMING_SNAKE_CASE_: Tuple =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: str =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Optional[Any] =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: Dict =datasets.map(
lowercase , batched=lowercase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Any =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: List[str] =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Any =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: List[str] =8
else:
SCREAMING_SNAKE_CASE_: Any =None
return tokenizer.pad(
lowercase , padding="""longest""" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Tuple =DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase = mocked_dataloaders # noqa: F811
def __magic_name__ ( lowercase , lowercase ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowercase ) == "1":
SCREAMING_SNAKE_CASE_: Dict =2
# New Code #
SCREAMING_SNAKE_CASE_: str =int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE_: Tuple =int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_: List[str] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: Any =config["""lr"""]
SCREAMING_SNAKE_CASE_: List[str] =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: int =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: List[Any] =int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE_: int =evaluate.load("""glue""" , """mrpc""" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: List[str] =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: str =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Any =AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: List[str] =get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str =accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
with LocalSGD(
accelerator=lowercase , model=lowercase , local_sgd_steps=lowercase , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE_: int =model(**lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict =model(**lowercase )
SCREAMING_SNAKE_CASE_: str =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE_: int =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: str =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowercase , default=lowercase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowercase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowercase , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE_: Any =parser.parse_args()
SCREAMING_SNAKE_CASE_: List[Any] ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 173 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase = re.compile(r"""([a-z\d])([A-Z])""")
_UpperCAmelCase = re.compile(r"""(?<!_)_(?!_)""")
_UpperCAmelCase = re.compile(r"""(_{2,})""")
_UpperCAmelCase = r"""^\w+(\.\w+)*$"""
_UpperCAmelCase = r"""<>:/\|?*"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_uppercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
SCREAMING_SNAKE_CASE_: str =_lowercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
return name.lower()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE_: Any =[_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __magic_name__ ( lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
return f'''{filepath}*'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_: Optional[int] =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_: List[Any] =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 173 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''bertabs-finetuned-cnndm''': '''https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[int] = 'bertabs'
def __init__( self , lowerCamelCase__=3_0_5_2_2 , lowerCamelCase__=5_1_2 , lowerCamelCase__=6 , lowerCamelCase__=5_1_2 , lowerCamelCase__=8 , lowerCamelCase__=5_1_2 , lowerCamelCase__=0.2 , lowerCamelCase__=6 , lowerCamelCase__=7_6_8 , lowerCamelCase__=8 , lowerCamelCase__=2_0_4_8 , lowerCamelCase__=0.2 , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = max_pos
_lowerCamelCase = enc_layers
_lowerCamelCase = enc_hidden_size
_lowerCamelCase = enc_heads
_lowerCamelCase = enc_ff_size
_lowerCamelCase = enc_dropout
_lowerCamelCase = dec_layers
_lowerCamelCase = dec_hidden_size
_lowerCamelCase = dec_heads
_lowerCamelCase = dec_ff_size
_lowerCamelCase = dec_dropout
| 360 |
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase_( lowercase_ : list , lowercase_ : int ) -> Tuple:
# Checks if the entire collection has been sorted
if len(lowercase_ ) <= 1 or n <= 1:
return
insert_next(lowercase_ , n - 1 )
rec_insertion_sort(lowercase_ , n - 1 )
def lowerCAmelCase_( lowercase_ : list , lowercase_ : int ) -> Any:
# Checks order between adjacent elements
if index >= len(lowercase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
_lowerCamelCase , _lowerCamelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowercase_ , index + 1 )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Optional[int] = input('''Enter integers separated by spaces: ''')
__SCREAMING_SNAKE_CASE : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 73 | 0 |
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def __get__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> List[str]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('''unreadable attribute''' )
UpperCamelCase :Any = '''__cached_''' + self.fget.__name__
UpperCamelCase :str = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if cached is None:
UpperCamelCase :Any = self.fget(SCREAMING_SNAKE_CASE_ )
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cached
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
UpperCamelCase :Optional[int] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'''invalid truth value {val!r}''' )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if is_torch_fx_proxy(SCREAMING_SNAKE_CASE__ ):
return True
if is_torch_available():
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(SCREAMING_SNAKE_CASE__ , (jnp.ndarray, Tracer) ):
return True
return isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
return isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
return _is_numpy(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
import torch
return isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
return False if not is_torch_available() else _is_torch(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
import torch
return isinstance(SCREAMING_SNAKE_CASE__ , torch.device )
def _A ( SCREAMING_SNAKE_CASE__ : Any ):
return False if not is_torch_available() else _is_torch_device(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] ):
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if hasattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Dict = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
return False
return isinstance(SCREAMING_SNAKE_CASE__ , torch.dtype )
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return False if not is_torch_available() else _is_torch_dtype(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
import tensorflow as tf
return isinstance(SCREAMING_SNAKE_CASE__ , tf.Tensor )
def _A ( SCREAMING_SNAKE_CASE__ : str ):
return False if not is_tf_available() else _is_tensorflow(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(SCREAMING_SNAKE_CASE__ , '''is_symbolic_tensor''' ):
return tf.is_symbolic_tensor(SCREAMING_SNAKE_CASE__ )
return type(SCREAMING_SNAKE_CASE__ ) == tf.Tensor
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
return False if not is_tf_available() else _is_tf_symbolic_tensor(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
import jax.numpy as jnp # noqa: F811
return isinstance(SCREAMING_SNAKE_CASE__ , jnp.ndarray )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
return False if not is_flax_available() else _is_jax(SCREAMING_SNAKE_CASE__ )
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE__ , (dict, UserDict) ):
return {k: to_py_obj(SCREAMING_SNAKE_CASE__ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return [to_py_obj(SCREAMING_SNAKE_CASE__ ) for o in obj]
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.numpy().tolist()
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return np.asarray(SCREAMING_SNAKE_CASE__ ).tolist()
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _A ( SCREAMING_SNAKE_CASE__ : Tuple ):
if isinstance(SCREAMING_SNAKE_CASE__ , (dict, UserDict) ):
return {k: to_numpy(SCREAMING_SNAKE_CASE__ ) for k, v in obj.items()}
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return np.array(SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.numpy()
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return np.asarray(SCREAMING_SNAKE_CASE__ )
else:
return obj
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase :Any = fields(self )
# Safety and consistency checks
if not len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(F'''{self.__class__.__name__} has no fields.''' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'''{self.__class__.__name__} should not have more than one required field.''' )
UpperCamelCase :Optional[int] = getattr(self , class_fields[0].name )
UpperCamelCase :str = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :List[Any] = first_field.items()
UpperCamelCase :Dict = True
else:
try:
UpperCamelCase :Any = iter(SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = True
except TypeError:
UpperCamelCase :List[Any] = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(SCREAMING_SNAKE_CASE_ ):
if (
not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) )
or not len(SCREAMING_SNAKE_CASE_ ) == 2
or not isinstance(element[0] , SCREAMING_SNAKE_CASE_ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
UpperCamelCase :Union[str, Any] = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'''Cannot set key/value for {element}. It needs to be a tuple (key, value).''' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
UpperCamelCase :str = element[1]
elif first_field is not None:
UpperCamelCase :Optional[Any] = first_field
else:
for field in class_fields:
UpperCamelCase :List[Any] = getattr(self , field.name )
if v is not None:
UpperCamelCase :List[str] = v
def __delitem__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
raise Exception(F'''You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.''' )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
raise Exception(F'''You cannot use ``setdefault`` on a {self.__class__.__name__} instance.''' )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
raise Exception(F'''You cannot use ``pop`` on a {self.__class__.__name__} instance.''' )
def UpperCAmelCase ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
raise Exception(F'''You cannot use ``update`` on a {self.__class__.__name__} instance.''' )
def __getitem__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase :Union[str, Any] = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
super().__setattr__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __setitem__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
# Will raise a KeyException if needed
super().__setitem__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class UpperCAmelCase_ ( lowercase, lowercase ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ ) -> int:
raise ValueError(
F'''{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}''' )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Tuple ='longest'
UpperCamelCase_ : Any ='max_length'
UpperCamelCase_ : Optional[int] ='do_not_pad'
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Any ='pt'
UpperCamelCase_ : Optional[Any] ='tf'
UpperCamelCase_ : str ='np'
UpperCamelCase_ : List[str] ='jax'
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCamelCase :Union[str, Any] = context_managers
UpperCamelCase :str = ExitStack()
def __enter__( self ) -> List[str]:
for context_manager in self.context_managers:
self.stack.enter_context(SCREAMING_SNAKE_CASE_ )
def __exit__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
self.stack.__exit__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Any = infer_framework(SCREAMING_SNAKE_CASE__ )
if framework == "tf":
UpperCamelCase :Union[str, Any] = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase :Any = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase :Optional[int] = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _A ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :List[Any] = model_class.__name__
UpperCamelCase :Optional[int] = infer_framework(SCREAMING_SNAKE_CASE__ )
if framework == "tf":
UpperCamelCase :Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
UpperCamelCase :Dict = inspect.signature(model_class.forward ) # PyTorch models
else:
UpperCamelCase :Tuple = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _A ( SCREAMING_SNAKE_CASE__ : MutableMapping , SCREAMING_SNAKE_CASE__ : str = "" , SCREAMING_SNAKE_CASE__ : str = "." ):
def _flatten_dict(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str="" , SCREAMING_SNAKE_CASE__ : Optional[Any]="." ):
for k, v in d.items():
UpperCamelCase :Optional[int] = str(SCREAMING_SNAKE_CASE__ ) + delimiter + str(SCREAMING_SNAKE_CASE__ ) if parent_key else k
if v and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
yield from flatten_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , delimiter=SCREAMING_SNAKE_CASE__ ).items()
else:
yield key, v
return dict(_flatten_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
@contextmanager
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ):
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple=None ):
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.transpose(SCREAMING_SNAKE_CASE__ , axes=SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.T if axes is None else array.permute(*SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.transpose(SCREAMING_SNAKE_CASE__ , perm=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.transpose(SCREAMING_SNAKE_CASE__ , axes=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''Type not supported for transpose: {type(SCREAMING_SNAKE_CASE__ )}.''' )
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict ):
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.reshape(*SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.reshape(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''Type not supported for reshape: {type(SCREAMING_SNAKE_CASE__ )}.''' )
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any=None ):
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.squeeze() if axis is None else array.squeeze(dim=SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.squeeze(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''Type not supported for squeeze: {type(SCREAMING_SNAKE_CASE__ )}.''' )
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.expand_dims(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.unsqueeze(dim=SCREAMING_SNAKE_CASE__ )
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.expand_dims(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return jnp.expand_dims(SCREAMING_SNAKE_CASE__ , axis=SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(F'''Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE__ )}.''' )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
if is_numpy_array(SCREAMING_SNAKE_CASE__ ):
return np.size(SCREAMING_SNAKE_CASE__ )
elif is_torch_tensor(SCREAMING_SNAKE_CASE__ ):
return array.numel()
elif is_tf_tensor(SCREAMING_SNAKE_CASE__ ):
import tensorflow as tf
return tf.size(SCREAMING_SNAKE_CASE__ )
elif is_jax_tensor(SCREAMING_SNAKE_CASE__ ):
return array.size
else:
raise ValueError(F'''Type not supported for expand_dims: {type(SCREAMING_SNAKE_CASE__ )}.''' )
def _A ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
for key, value in auto_map.items():
if isinstance(SCREAMING_SNAKE_CASE__ , (tuple, list) ):
UpperCamelCase :Optional[Any] = [F'''{repo_id}--{v}''' if (v is not None and '''--''' not in v) else v for v in value]
elif value is not None and "--" not in value:
UpperCamelCase :List[str] = F'''{repo_id}--{value}'''
return auto_map
def _A ( SCREAMING_SNAKE_CASE__ : str ):
for base_class in inspect.getmro(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[str] = base_class.__module__
UpperCamelCase :str = base_class.__name__
if module.startswith('''tensorflow''' ) or module.startswith('''keras''' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('''torch''' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('''flax''' ) or module.startswith('''jax''' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'''Could not infer framework from class {model_class}.''' )
| 259 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='git_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=224 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_="quick_gelu" , SCREAMING_SNAKE_CASE_=1e-5 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.02 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = hidden_size
UpperCamelCase :Union[str, Any] = intermediate_size
UpperCamelCase :Dict = num_hidden_layers
UpperCamelCase :int = num_attention_heads
UpperCamelCase :List[str] = num_channels
UpperCamelCase :Optional[int] = patch_size
UpperCamelCase :Optional[int] = image_size
UpperCamelCase :List[Any] = initializer_range
UpperCamelCase :Union[str, Any] = attention_dropout
UpperCamelCase :Tuple = layer_norm_eps
UpperCamelCase :Optional[Any] = hidden_act
@classmethod
def UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase :Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
UpperCamelCase :Tuple = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] ='git'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=3_0522 , SCREAMING_SNAKE_CASE_=768 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=3072 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=1024 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1e-12 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=101 , SCREAMING_SNAKE_CASE_=102 , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if vision_config is None:
UpperCamelCase :Tuple = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
UpperCamelCase :Union[str, Any] = GitVisionConfig(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = vocab_size
UpperCamelCase :Optional[Any] = hidden_size
UpperCamelCase :List[Any] = num_hidden_layers
UpperCamelCase :List[Any] = num_attention_heads
UpperCamelCase :Dict = hidden_act
UpperCamelCase :List[str] = intermediate_size
UpperCamelCase :List[str] = hidden_dropout_prob
UpperCamelCase :Optional[int] = attention_probs_dropout_prob
UpperCamelCase :Optional[Any] = max_position_embeddings
UpperCamelCase :Tuple = initializer_range
UpperCamelCase :Any = layer_norm_eps
UpperCamelCase :int = position_embedding_type
UpperCamelCase :Dict = use_cache
UpperCamelCase :Tuple = tie_word_embeddings
UpperCamelCase :Union[str, Any] = num_image_with_embedding
UpperCamelCase :Optional[int] = bos_token_id
UpperCamelCase :List[Any] = eos_token_id
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCamelCase :Optional[int] = self.vision_config.to_dict()
UpperCamelCase :int = self.__class__.model_type
return output
| 259 | 1 |
'''simple docstring'''
import random
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[Any] = num - 1
UpperCAmelCase : Optional[int] = 0
while s % 2 == 0:
UpperCAmelCase : Any = s // 2
t += 1
for _ in range(5 ):
UpperCAmelCase : Any = random.randrange(2 , num - 1 )
UpperCAmelCase : Union[str, Any] = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if v != 1:
UpperCAmelCase : List[Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
UpperCAmelCase : str = i + 1
UpperCAmelCase : Optional[Any] = (v**2) % num
return True
def UpperCamelCase( UpperCAmelCase_ ):
if num < 2:
return False
UpperCAmelCase : Optional[Any] = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase__ )
def UpperCamelCase( UpperCAmelCase_ = 10_24 ):
while True:
UpperCAmelCase : Union[str, Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase__ ):
return num
if __name__ == "__main__":
lowercase__ = generate_large_prime()
print(("Prime number:", num))
print(("is_prime_low_num:", is_prime_low_num(num)))
| 360 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase__ = re.compile(r"([a-z\d])([A-Z])")
lowercase__ = re.compile(r"(?<!_)_(?!_)")
lowercase__ = re.compile(r"(_{2,})")
lowercase__ = r"^\w+(\.\w+)*$"
lowercase__ = r"<>:/\|?*"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
UpperCAmelCase : str = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
return name.lower()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _single_underscore_re.split(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != '' )
def UpperCamelCase( UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCAmelCase_ )}-{split}"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Dict = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F"""{filepath}*"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
UpperCAmelCase : Dict = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase : Optional[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 280 | 0 |
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = GPTSwaTokenizer
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
def A ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = GPTSwaTokenizer(UpperCamelCase__ , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'This is a test'
UpperCamelCase = 'This is a test'
return input_text, output_text
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = '<s>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCamelCase__ ) , 2_0_0_0 )
def A ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(UpperCamelCase__ )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase__ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
UpperCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
# fmt: off
self.assertListEqual(
UpperCamelCase__ , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = GPTSwaTokenizer(UpperCamelCase__ )
UpperCamelCase = ['This is a test', 'I was born in 92000, and this is falsé.']
UpperCamelCase = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertListEqual(tokenizer.encode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertEqual(tokenizer.decode_fast(UpperCamelCase__ ) , UpperCamelCase__ )
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
UpperCamelCase = {'input_ids': [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='AI-Sweden/gpt-sw3-126m' , sequences=UpperCamelCase__ , )
| 28 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCAmelCase = """true"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : int=8_2 , snake_case_ : Optional[Any]=1_6 ) ->Dict:
set_seed(4_2 )
lowerCamelCase__ : List[Any] =RegressionModel()
lowerCamelCase__ : List[Any] =deepcopy(snake_case_ )
lowerCamelCase__ : List[str] =RegressionDataset(length=snake_case_ )
lowerCamelCase__ : Any =DataLoader(snake_case_ , batch_size=snake_case_ )
model.to(accelerator.device )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.prepare(snake_case_ , snake_case_ )
return model, ddp_model, dataloader
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : str=False ) ->List[str]:
lowerCamelCase__ : int =AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCamelCase__ : List[Any] =load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case_ : Optional[Any] ):
lowerCamelCase__ : Optional[int] =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
with accelerator.main_process_first():
lowerCamelCase__ : Tuple =dataset.map(
snake_case_ , batched=snake_case_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCamelCase__ : List[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case_ : Union[str, Any] ):
if use_longest:
return tokenizer.pad(snake_case_ , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case_ , padding='max_length' , max_length=1_2_8 , return_tensors='pt' )
return DataLoader(snake_case_ , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1_6 )
def lowerCAmelCase_ ( snake_case_ : List[str] , snake_case_ : Tuple ) ->Any:
lowerCamelCase__ : Optional[int] =Accelerator(dispatch_batches=snake_case_ , split_batches=snake_case_ )
lowerCamelCase__ : List[Any] =get_dataloader(snake_case_ , not dispatch_batches )
lowerCamelCase__ : Union[str, Any] =AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Dict =accelerator.prepare(snake_case_ , snake_case_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : List[str] ) ->Dict:
lowerCamelCase__ : Optional[Any] =[]
for batch in dataloader:
lowerCamelCase__ , lowerCamelCase__ : int =batch.values()
with torch.no_grad():
lowerCamelCase__ : Optional[Any] =model(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =[], []
for logit, targ in logits_and_targets:
logits.append(snake_case_ )
targs.append(snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =torch.cat(snake_case_ ), torch.cat(snake_case_ )
return logits, targs
def lowerCAmelCase_ ( snake_case_ : Accelerator , snake_case_ : Optional[int]=8_2 , snake_case_ : Any=False , snake_case_ : List[Any]=False , snake_case_ : Optional[int]=1_6 ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =get_basic_setup(snake_case_ , snake_case_ , snake_case_ )
lowerCamelCase__ , lowerCamelCase__ : Any =generate_predictions(snake_case_ , snake_case_ , snake_case_ )
assert (
len(snake_case_ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case_ )}"""
def lowerCAmelCase_ ( snake_case_ : bool = False , snake_case_ : bool = False ) ->str:
lowerCamelCase__ : Dict =evaluate.load('glue' , 'mrpc' )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =get_mrpc_setup(snake_case_ , snake_case_ )
# First do baseline
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict =setup['no']
model.to(snake_case_ )
model.eval()
for batch in dataloader:
batch.to(snake_case_ )
with torch.inference_mode():
lowerCamelCase__ : Any =model(**snake_case_ )
lowerCamelCase__ : List[str] =outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case_ , references=batch['labels'] )
lowerCamelCase__ : Optional[Any] =metric.compute()
# Then do distributed
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Optional[int] =setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCamelCase__ : List[Any] =model(**snake_case_ )
lowerCamelCase__ : str =outputs.logits.argmax(dim=-1 )
lowerCamelCase__ : int =batch['labels']
lowerCamelCase__ , lowerCamelCase__ : List[Any] =accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case_ , references=snake_case_ )
lowerCamelCase__ : List[str] =metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def lowerCAmelCase_ ( ) ->str:
lowerCamelCase__ : List[str] =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case_ , snake_case_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCamelCase__ : Dict =Accelerator(split_batches=snake_case_ , dispatch_batches=snake_case_ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCamelCase__ : List[Any] =Accelerator()
test_torch_metrics(snake_case_ , 5_1_2 )
accelerator.state._reset_state()
def lowerCAmelCase_ ( snake_case_ : List[Any] ) ->Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 126 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 143 | # Lint as: python3
import itertools
import os
import re
UpperCamelCase__ = re.compile(R'([A-Z]+)([A-Z][a-z])')
UpperCamelCase__ = re.compile(R'([a-z\d])([A-Z])')
UpperCamelCase__ = re.compile(R'(?<!_)_(?!_)')
UpperCamelCase__ = re.compile(R'(_{2,})')
UpperCamelCase__ = R'^\w+(\.\w+)*$'
UpperCamelCase__ = R'<>:/\|?*'
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = _uppercase_uppercase_re.sub(r"\1_\2", __A )
UpperCAmelCase__ = _lowercase_uppercase_re.sub(r"\1_\2", __A )
return name.lower()
def lowerCAmelCase_ ( __A ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = _single_underscore_re.split(__A )
UpperCAmelCase__ = [_multiple_underscores_re.split(__A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__A ) if n != "" )
def lowerCAmelCase_ ( __A ) -> List[str]:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(__A )
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
if os.path.basename(__A ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re, __A ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(__A )}-{split}"""
def lowerCAmelCase_ ( __A, __A, __A, __A=None ) -> str:
'''simple docstring'''
UpperCAmelCase__ = filename_prefix_for_split(__A, __A )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
UpperCAmelCase__ = os.path.join(__A, __A )
return f"""{filepath}*"""
def lowerCAmelCase_ ( __A, __A, __A, __A=None, __A=None ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = filename_prefix_for_split(__A, __A )
UpperCAmelCase__ = os.path.join(__A, __A )
if shard_lengths:
UpperCAmelCase__ = len(__A )
UpperCAmelCase__ = [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(__A )]
if filetype_suffix:
UpperCAmelCase__ = [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase__ = prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 143 | 1 |
"""simple docstring"""
def snake_case_ ( A_ : int, A_ : int, A_ : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
_lowerCamelCase : Optional[int] = _modexpt(A_, exponent // 2, A_ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A_, exponent - 1, A_ )) % modulo_value
def snake_case_ ( A_ : int = 17_77, A_ : int = 18_55, A_ : int = 8 ):
'''simple docstring'''
_lowerCamelCase : int = base
for _ in range(1, A_ ):
_lowerCamelCase : Optional[Any] = _modexpt(A_, A_, 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowerCAmelCase ( lowercase : List[str] ) -> str:
"""simple docstring"""
snake_case : Optional[int] = botoa.client("iam" )
snake_case : Any = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase , AssumeRolePolicyDocument=json.dumps(lowercase , indent=2 ) )
snake_case : Union[str, Any] = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase , PolicyName=F'{role_name}_policy_permission' , PolicyDocument=json.dumps(lowercase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def __lowerCAmelCase ( lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
snake_case : Any = botoa.client("iam" )
return iam_client.get_role(RoleName=lowercase )["Role"]["Arn"]
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Optional[int] = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowercase , )
snake_case : int = None
if credentials_configuration == 0:
snake_case : Any = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
snake_case : List[str] = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
snake_case : Any = _ask_field("AWS Access Key ID: " )
snake_case : List[str] = aws_access_key_id
snake_case : Optional[int] = _ask_field("AWS Secret Access Key: " )
snake_case : Union[str, Any] = aws_secret_access_key
snake_case : Optional[Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
snake_case : List[str] = aws_region
snake_case : List[str] = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowercase , )
if role_management == 0:
snake_case : Tuple = _ask_field("Enter your IAM role name: " )
else:
snake_case : Union[str, Any] = "accelerate_sagemaker_execution_role"
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(lowercase )
snake_case : Union[str, Any] = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
snake_case : Any = None
if is_custom_docker_image:
snake_case : Union[str, Any] = _ask_field("Enter your Docker image: " , lambda lowercase : str(lowercase ).lower() )
snake_case : List[Any] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
snake_case : List[str] = None
if is_sagemaker_inputs_enabled:
snake_case : Dict = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda lowercase : str(lowercase ).lower() , )
snake_case : Tuple = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
snake_case : int = None
if is_sagemaker_metrics_enabled:
snake_case : int = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda lowercase : str(lowercase ).lower() , )
snake_case : str = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
snake_case : Tuple = {}
snake_case : Any = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
if use_dynamo:
snake_case : Any = "dynamo_"
snake_case : Optional[int] = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case : Optional[int] = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
if use_custom_options:
snake_case : Dict = _ask_options(
"Which mode do you want to use?" , lowercase , lambda lowercase : TORCH_DYNAMO_MODES[int(lowercase )] , default="default" , )
snake_case : Union[str, Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
snake_case : Dict = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase , error_message="Please enter yes or no." , )
snake_case : List[str] = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
snake_case : str = _ask_options(
lowercase , lowercase , lambda lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case : Union[str, Any] = _ask_field(lowercase , lambda lowercase : str(lowercase ).lower() , default="ml.p3.2xlarge" )
snake_case : Any = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case : Dict = _ask_field(
"How many machines do you want use? [1]: " , lowercase , default=1 , )
snake_case : Union[str, Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowercase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase , use_cpu=lowercase , dynamo_config=lowercase , eca_instance_type=lowercase , profile=lowercase , region=lowercase , iam_role_name=lowercase , mixed_precision=lowercase , num_machines=lowercase , sagemaker_inputs_file=lowercase , sagemaker_metrics_file=lowercase , )
| 203 | 0 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = len(__lowerCamelCase )
for i in range(n - 1 ):
for j in range(i + 1 , __lowerCamelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
if len(__lowerCamelCase ) <= 1:
return arr, 0
lowercase_ = len(__lowerCamelCase ) // 2
lowercase_ = arr[0:mid]
lowercase_ = arr[mid:]
lowercase_ , lowercase_ = count_inversions_recursive(__lowerCamelCase )
lowercase_ , lowercase_ = count_inversions_recursive(__lowerCamelCase )
lowercase_ , lowercase_ = _count_cross_inversions(__lowerCamelCase , __lowerCamelCase )
lowercase_ = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = []
lowercase_ = lowercase_ = lowercase_ = 0
while i < len(__lowerCamelCase ) and j < len(__lowerCamelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(__lowerCamelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(__lowerCamelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
lowercase_ = count_inversions_bf(__lowerCamelCase )
lowercase_ , lowercase_ = count_inversions_recursive(__lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print("number of inversions = " , __lowerCamelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
lowercase_ = count_inversions_bf(__lowerCamelCase )
lowercase_ , lowercase_ = count_inversions_recursive(__lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , __lowerCamelCase )
# an empty list should also have zero inversions
lowercase_ = []
lowercase_ = count_inversions_bf(__lowerCamelCase )
lowercase_ , lowercase_ = count_inversions_recursive(__lowerCamelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print("number of inversions = " , __lowerCamelCase )
if __name__ == "__main__":
main()
| 297 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __lowerCamelCase ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = StableUnCLIPImgaImgPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = 32
lowercase_ = embedder_hidden_size
# image encoding components
lowercase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ = StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ = AutoencoderKL()
lowercase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=True ) -> Tuple:
'''simple docstring'''
if str(UpperCAmelCase ).startswith("mps" ):
lowercase_ = torch.manual_seed(UpperCAmelCase )
else:
lowercase_ = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase_ = input_image * 0.5 + 0.5
lowercase_ = input_image.clamp(0 , 1 )
lowercase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ = DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase_ = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase_ = self.get_dummy_inputs(UpperCAmelCase )
inputs.update({"image_embeds": None} )
lowercase_ = sd_pipe(**UpperCAmelCase ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
lowercase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase_ = pipe(UpperCAmelCase , "anime turle" , generator=UpperCAmelCase , output_type="np" )
lowercase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
lowercase_ = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ = pipe(
UpperCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297 | 1 |
import os
import numpy
import onnx
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> Any:
_lowercase : Optional[Any] = a.name
_lowercase : Dict = b.name
_lowercase : List[str] = ''
_lowercase : int = ''
_lowercase : Optional[int] = a == b
_lowercase : str = name_a
_lowercase : Optional[int] = name_b
return res
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Union[str, Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowerCamelCase_ , lowerCamelCase_ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase_ , lowerCamelCase_ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> int:
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : List[str] = list(model.graph.initializer )
_lowercase : str = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
_lowercase : List[Any] = inits[i].name
_lowercase : Optional[int] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = os.path.dirname(lowerCamelCase_ )
_lowercase : Tuple = os.path.basename(lowerCamelCase_ )
_lowercase : Optional[int] = onnx.load(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
_lowercase : Optional[int] = list(model.graph.initializer )
_lowercase : Dict = set()
_lowercase : Optional[int] = {}
_lowercase : Union[str, Any] = []
_lowercase : Optional[int] = 0
for i in range(len(lowerCamelCase_ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase_ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowerCamelCase_ )
dup_set.add(lowerCamelCase_ )
_lowercase : Optional[int] = inits[j].data_type
_lowercase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , lowerCamelCase_ )
total_reduced_size += mem_size
_lowercase : str = inits[i].name
_lowercase : List[str] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase_ )
else:
_lowercase : Tuple = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
_lowercase : int = sorted(lowerCamelCase_ )
_remove_dup_initializers_from_model(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = 'optimized_' + model_file_name
_lowercase : int = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
onnx.save(lowerCamelCase_ , lowerCamelCase_ )
return new_model
| 21 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowerCamelCase( _a ):
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Tuple = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowerCamelCase, 'width_multiplier'))
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=64, lowerCamelCase=2, lowerCamelCase=3, lowerCamelCase="swish", lowerCamelCase=3, lowerCamelCase=32, lowerCamelCase=0.1, lowerCamelCase=0.0_2, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=10, lowerCamelCase=None, lowerCamelCase=0.2_5, lowerCamelCase=0.0, lowerCamelCase=0.0, ) -> Any:
"""simple docstring"""
_lowercase : Any = parent
_lowercase : Optional[int] = batch_size
_lowercase : Dict = image_size
_lowercase : str = patch_size
_lowercase : Optional[int] = num_channels
_lowercase : Optional[Any] = make_divisible(5_12 * width_multiplier, divisor=8)
_lowercase : str = hidden_act
_lowercase : Dict = conv_kernel_size
_lowercase : int = output_stride
_lowercase : Optional[Any] = classifier_dropout_prob
_lowercase : Tuple = use_labels
_lowercase : int = is_training
_lowercase : Optional[Any] = num_labels
_lowercase : Dict = initializer_range
_lowercase : List[str] = scope
_lowercase : Tuple = width_multiplier
_lowercase : List[str] = ffn_dropout
_lowercase : Dict = attn_dropout
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowercase : Dict = None
_lowercase : Optional[int] = None
if self.use_labels:
_lowercase : Optional[Any] = ids_tensor([self.batch_size], self.num_labels)
_lowercase : str = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels)
_lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
_lowercase : int = self.num_labels
_lowercase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> int:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : Union[str, Any] = MobileViTVaForSemanticSegmentation(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
_lowercase : List[Any] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase , _lowercase : int = config_and_inputs
_lowercase : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ : Dict = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ : List[Any] = False
lowercase_ : Optional[int] = False
lowercase_ : List[Any] = False
lowercase_ : Tuple = False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : Union[str, Any] = MobileViTVaModelTester(self)
_lowercase : Tuple = MobileViTVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings')
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViTV2 does not output attentions')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.')
def UpperCamelCase ( self) -> int:
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : List[Any] = model_class(lowerCamelCase)
_lowercase : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : Any = [*signature.parameters.keys()]
_lowercase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase):
_lowercase : Optional[Any] = model_class(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
with torch.no_grad():
_lowercase : Optional[int] = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase))
_lowercase : List[Any] = outputs.hidden_states
_lowercase : Tuple = 5
self.assertEqual(len(lowerCamelCase), lowerCamelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowercase : Optional[int] = 2
for i in range(len(lowerCamelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2)
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase)
@slow
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : str = MobileViTVaModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
def UpperCamelCase_( ) -> Dict:
_lowercase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCamelCase( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256')
if is_vision_available()
else None
)
@slow
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256').to(
lowerCamelCase)
_lowercase : Dict = self.default_image_processor
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**lowerCamelCase)
# verify the logits
_lowercase : Optional[int] = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01]).to(lowerCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Optional[int] = model.to(lowerCamelCase)
_lowercase : Optional[int] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Union[str, Any] = prepare_img()
_lowercase : Tuple = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : List[Any] = model(**lowerCamelCase)
_lowercase : str = outputs.logits
# verify the logits
_lowercase : Tuple = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape, lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=lowerCamelCase, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], lowerCamelCase, atol=1E-4))
@slow
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : Tuple = model.to(lowerCamelCase)
_lowercase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3')
_lowercase : int = prepare_img()
_lowercase : Dict = image_processor(images=lowerCamelCase, return_tensors='pt').to(lowerCamelCase)
# forward pass
with torch.no_grad():
_lowercase : Union[str, Any] = model(**lowerCamelCase)
_lowercase : Any = outputs.logits.detach().cpu()
_lowercase : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase, target_sizes=[(50, 60)])
_lowercase : Any = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
_lowercase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase)
_lowercase : Optional[int] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape, lowerCamelCase)
| 21 | 1 |
def __UpperCAmelCase ( __a : Tuple ) -> str:
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
_a : Optional[int] = ''''''
while len(__a ) % 3 != 0:
_a : int = '''0''' + bin_string
_a : Optional[Any] = [
bin_string[index : index + 3]
for index in range(len(__a ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_a : Optional[Any] = 0
for index, val in enumerate(__a ):
oct_val += int(2 ** (2 - index) * int(__a ) )
oct_string += str(__a )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 351 |
from math import ceil
def __UpperCAmelCase ( __a : int = 1_001 ) -> int:
"""simple docstring"""
_a : Dict = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
_a : int = 2 * i + 1
_a : str = 2 * i
_a : Any = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
a__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 15 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Optional[int] = HfArgumentParser(lowerCamelCase__ )
__lowerCamelCase : Dict = parser.parse_args_into_dataclasses()[0]
__lowerCamelCase : Any = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
__lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__lowerCamelCase : str = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
__lowerCamelCase : Dict = ' '.join(str(lowerCamelCase__ ).split(' ' )[:-1] )
__lowerCamelCase : List[Any] = ''
__lowerCamelCase : Dict = eval(str(lowerCamelCase__ ).split(' ' )[-1] )
__lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Tuple = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 73 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__snake_case =random.Random()
if is_torch_available():
import torch
def a_ ( lowerCamelCase : Dict , lowerCamelCase : Dict=1.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=None ):
if rng is None:
lowerCAmelCase = global_rng
lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]=7 , UpperCAmelCase__ : int=4_0_0 , UpperCAmelCase__ : int=2_0_0_0 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=1_6_0_0_0 , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Union[str, Any]=True , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = min_seq_length
lowerCAmelCase = max_seq_length
lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase = feature_size
lowerCAmelCase = padding_value
lowerCAmelCase = sampling_rate
lowerCAmelCase = return_attention_mask
lowerCAmelCase = do_normalize
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Union[str, Any]=False ) -> Optional[Any]:
def _flatten(UpperCAmelCase__ : int ):
return list(itertools.chain(*UpperCAmelCase__ ) )
if equal_length:
lowerCAmelCase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
lowerCamelCase : Dict = ASTFeatureExtractor
def __UpperCAmelCase ( self : str ) -> Optional[int]:
lowerCAmelCase = ASTFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
lowerCAmelCase = [np.asarray(UpperCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test batched
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
lowerCAmelCase = np.asarray(UpperCAmelCase__ )
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
lowerCAmelCase = feat_extract(UpperCAmelCase__ , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import torch
lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase = np.random.rand(1_0_0 ).astype(np.floataa )
lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : str ) -> Tuple:
from datasets import load_dataset
lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase__ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
# fmt: off
lowerCAmelCase = torch.tensor(
[-0.9_894, -1.2_776, -0.9_066, -1.2_776, -0.9_349, -1.2_609, -1.0_386, -1.2_776,
-1.1_561, -1.2_776, -1.2_052, -1.2_723, -1.2_190, -1.2_132, -1.2_776, -1.1_133,
-1.1_953, -1.1_343, -1.1_584, -1.2_203, -1.1_770, -1.2_474, -1.2_381, -1.1_936,
-0.9_270, -0.8_317, -0.8_049, -0.7_706, -0.7_565, -0.7_869] )
# fmt: on
lowerCAmelCase = self._load_datasamples(1 )
lowerCAmelCase = ASTFeatureExtractor()
lowerCAmelCase = feature_extractor(UpperCAmelCase__ , return_tensors='pt' ).input_values
self.assertEquals(input_values.shape , (1, 1_0_2_4, 1_2_8) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , UpperCAmelCase__ , atol=1E-4 ) )
| 4 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class a :
def __init__( self ):
"""simple docstring"""
lowerCAmelCase = []
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = pos
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase = 2 * start + 1
else:
lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase ,lowerCAmelCase = heap[smallest_child], positions[smallest_child]
lowerCAmelCase ,lowerCAmelCase = (
heap[start],
positions[start],
)
lowerCAmelCase ,lowerCAmelCase = temp, tempa
lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _snake_case )
self.top_to_bottom(_snake_case , _snake_case , _snake_case , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = position[index]
while index != 0:
lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase = heap[parent]
lowerCAmelCase = position[parent]
self.set_position(position[parent] , _snake_case )
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(_snake_case , _snake_case )
break
lowerCAmelCase = parent
else:
lowerCAmelCase = val
lowerCAmelCase = temp
self.set_position(_snake_case , 0 )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = len(_snake_case ) // 2 - 1
for i in range(_snake_case , -1 , -1 ):
self.top_to_bottom(_snake_case , _snake_case , len(_snake_case ) , _snake_case )
def UpperCamelCase__ ( self , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = positions[0]
lowerCAmelCase = sys.maxsize
self.top_to_bottom(_snake_case , 0 , len(_snake_case ) , _snake_case )
return temp
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
lowerCAmelCase = Heap()
lowerCAmelCase = [0] * len(_UpperCAmelCase )
lowerCAmelCase = [-1] * len(_UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase = []
for vertex in range(len(_UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCAmelCase )
heap.node_position.append(_UpperCAmelCase )
lowerCAmelCase = []
lowerCAmelCase = 1
lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase = 0
lowerCAmelCase = distance
heap.heapify(_UpperCAmelCase , _UpperCAmelCase )
for _ in range(1 , len(_UpperCAmelCase ) ):
lowerCAmelCase = heap.delete_minimum(_UpperCAmelCase , _UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCAmelCase )]
):
lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCAmelCase , heap.get_position(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__UpperCamelCase : List[str] = int(input('''Enter number of edges: ''').strip())
__UpperCamelCase : str = defaultdict(list)
for _ in range(edges_number):
__UpperCamelCase : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 309 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'split_dict' , [
SplitDict(),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 , dataset_name='my_dataset' )} ),
SplitDict({'train': SplitInfo(name='train' , num_bytes=1337 , num_examples=42 )} ),
SplitDict({'train': SplitInfo()} ),
] , )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : SplitDict ):
lowerCAmelCase = split_dict._to_yaml_list()
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
lowerCAmelCase = SplitDict._from_yaml_list(_UpperCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
lowerCAmelCase = None
# the split name of split_dict takes over the name of the split info object
lowerCAmelCase = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'split_info' , [SplitInfo(), SplitInfo(dataset_name=_UpperCAmelCase ), SplitInfo(dataset_name='my_dataset' )] )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
lowerCAmelCase = asdict(SplitDict({'train': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class A__ ( _snake_case ):
lowercase = (DPMSolverSDEScheduler,)
lowercase = 10
def snake_case_ ( self , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**UpperCamelCase__ )
return config
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(prediction_type="""v_prediction""" )
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3
def snake_case_ ( self ) -> int:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
A_ = sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
A_ = scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
A_ = model(UpperCamelCase__ , UpperCamelCase__ )
A_ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ = output.prev_sample
A_ = torch.sum(torch.abs(UpperCamelCase__ ) )
A_ = torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
| 162 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A__ :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=4 , UpperCamelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = self.vocab_size - 1
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = OpenAIGPTModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
A_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> int:
'''simple docstring'''
A_ = OpenAIGPTDoubleHeadsModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = self.num_labels
A_ = OpenAIGPTForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
lowercase = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowercase = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowercase = (
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
A_ = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = inputs_dict["""labels"""]
A_ = inputs_dict["""labels"""]
A_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCamelCase__ , )
A_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = OpenAIGPTModelTester(self )
A_ = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCamelCase__ )
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCamelCase__ )
@slow
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = OpenAIGPTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(UpperCamelCase__ )
A_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCamelCase__ ) # the president is
A_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase__ )
| 162 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =CpmAntTokenizer
lowerCamelCase__ =False
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().setUp()
__snake_case : Optional[int] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
__snake_case : Tuple = '''今天天气真好!'''
__snake_case : int = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__snake_case : int = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
__snake_case : List[Any] = '''今天天气真好!'''
__snake_case : Dict = [tokenizer.bos_token] + tokens
__snake_case : List[str] = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
__snake_case : Optional[Any] = tokenizer.decode(a_ )
self.assertEqual(a_ , a_ )
| 370 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 0 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = VQModel
lowerCAmelCase_ = '''sample'''
@property
def UpperCAmelCase__ ( self : List[Any] , _A : Optional[int]=(32, 32) ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = 4
__SCREAMING_SNAKE_CASE : Dict = 3
__SCREAMING_SNAKE_CASE : int = floats_tensor((batch_size, num_channels) + sizes ).to(_SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return (3, 32, 32)
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
__SCREAMING_SNAKE_CASE : Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(_SCREAMING_SNAKE_CASE ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__SCREAMING_SNAKE_CASE : int = image.to(_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ).sample
__SCREAMING_SNAKE_CASE : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
| 303 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowercase ( *__snake_case ) -> Optional[Any]:
with open(__snake_case ,"r" ) as fh:
fcntl.flock(__snake_case ,fcntl.LOCK_EX )
try:
print(*__snake_case )
finally:
fcntl.flock(__snake_case ,fcntl.LOCK_UN )
__snake_case : List[Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__snake_case : List[str] = torch.device('cuda', local_rank)
__snake_case : Optional[Any] = socket.gethostname()
__snake_case : str = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case : Tuple = dist.get_rank()
__snake_case : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise | 269 | 0 |
"""simple docstring"""
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :str = """openai/whisper-base"""
_SCREAMING_SNAKE_CASE :int = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_SCREAMING_SNAKE_CASE :Tuple = """transcriber"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = WhisperProcessor
_SCREAMING_SNAKE_CASE :str = WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE :Any = ["""audio"""]
_SCREAMING_SNAKE_CASE :List[str] = ["""text"""]
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return self.pre_processor(_a , return_tensors="""pt""" ).input_features
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
return self.model.generate(inputs=_a )
def _a ( self , _a ) -> Any:
"""simple docstring"""
return self.pre_processor.batch_decode(_a , skip_special_tokens=_a )[0]
| 364 |
"""simple docstring"""
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Dict = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase ).read()
_check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Tuple = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE__ : Dict = (
Features({feature: Value(__lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase ).read()
_check_sql_dataset(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> Optional[int]:
with contextlib.closing(sqlitea.connect(__lowerCAmelCase ) ) as con:
SCREAMING_SNAKE_CASE__ : Tuple = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : Dict = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : str = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
SCREAMING_SNAKE_CASE__ : Tuple = iter_sql_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = iter_sql_file(__lowerCAmelCase )
for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : int = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : int = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
SCREAMING_SNAKE_CASE__ : List[str] = iter_sql_file(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = iter_sql_file(__lowerCAmelCase )
for rowa, rowa in zip(__lowerCAmelCase , __lowerCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Any = tmp_path / """cache"""
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(__lowerCAmelCase , """tmp.sql""" )
SCREAMING_SNAKE_CASE__ : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__lowerCAmelCase ).read()
with pytest.raises(__lowerCAmelCase ):
SqlDatasetWriter(__lowerCAmelCase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 56 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_SCREAMING_SNAKE_CASE : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _snake_case :
lowerCAmelCase_ : str = field(
default=lowercase_ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase_ )} )
lowerCAmelCase_ : str = field(
default=lowercase_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} )
lowerCAmelCase_ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCAmelCase_ : int = field(
default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , )
lowerCAmelCase_ : int = field(
default=64 , metadata={
"help": (
"The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length."
)
} , )
lowerCAmelCase_ : int = field(
default=30 , metadata={
"help": (
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
)
} , )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCAmelCase_ : bool = field(
default=lowercase_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} )
lowerCAmelCase_ : float = field(
default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase_ : int = field(
default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} )
lowerCAmelCase_ : int = field(
default=0 , metadata={
"help": (
"language id of input for language-specific xlm models (see"
" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"
)
} , )
lowerCAmelCase_ : int = field(default=1 , metadata={"help": "multiple threads for converting example to features"} )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : int = "train"
lowerCAmelCase_ : Tuple = "dev"
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : SquadDataTrainingArguments
lowerCAmelCase_ : List[SquadFeatures]
lowerCAmelCase_ : Split
lowerCAmelCase_ : bool
def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = False , a__ = None , a__ = "pt" , ) -> Any:
'''simple docstring'''
snake_case_ = args
snake_case_ = is_language_sensitive
snake_case_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a__ , a__ ):
try:
snake_case_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
snake_case_ = mode
# Load data features from cache or dataset file
snake_case_ = "v2" if args.version_2_with_negative else "v1"
snake_case_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ = cached_features_file + ".lock"
with FileLock(a__ ):
if os.path.exists(a__ ) and not args.overwrite_cache:
snake_case_ = time.time()
snake_case_ = torch.load(a__ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case_ = self.old_features["features"]
snake_case_ = self.old_features.get("dataset" , a__ )
snake_case_ = self.old_features.get("examples" , a__ )
logger.info(
F'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
F'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
snake_case_ = self.processor.get_dev_examples(args.data_dir )
else:
snake_case_ = self.processor.get_train_examples(args.data_dir )
snake_case_ , snake_case_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a__ , )
snake_case_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , a__ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> str:
'''simple docstring'''
return len(self.features )
def __getitem__( self , a__ ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
snake_case_ = self.features[i]
snake_case_ = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case_ = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case_ = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case_ = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case_ = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case_ = torch.tensor(feature.start_position , dtype=torch.long )
snake_case_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 85 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__a = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 | 0 |
import os
def __A ( ) -> Union[str, Any]:
a : Optional[Any] = os.path.join(os.path.dirname(_lowercase ) , """num.txt""" )
with open(_lowercase ) as file_hand:
return str(sum(int(_lowercase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 359 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : Optional[Any] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = ["MobileNetV2FeatureExtractor"]
__UpperCamelCase : Tuple = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 347 | 0 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Optional[int] = ReformerTokenizer
__A : Dict = ReformerTokenizerFast
__A : Dict = True
__A : Optional[int] = False
__A : Tuple = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : Dict = ReformerTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "<s>"
lowerCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__A ) , 1000 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Dict = self.get_rust_tokenizer()
lowerCamelCase : str = "I was born in 92000, and this is falsé."
lowerCamelCase : Optional[Any] = tokenizer.tokenize(__A )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCamelCase : str = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = tokenizer.encode(__A )
lowerCamelCase : str = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def _snake_case ( self , __A=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
lowerCamelCase : int = "This is a simple input"
lowerCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase : Any = ("This is a simple input", "This is a pair")
lowerCamelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = ReformerTokenizer(__A , keep_accents=__A )
lowerCamelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCamelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = "Hello World!"
lowerCamelCase : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase : Any = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def _snake_case ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase : Tuple = " ".join(__A )
lowerCamelCase : List[str] = self.big_tokenizer.encode_plus(__A , return_tensors="pt" )
lowerCamelCase : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
lowerCamelCase : Any = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase : Any = encoded_sequence["input_ids"].shape
lowerCamelCase : Dict = ReformerModel(__A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase : Optional[Any] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__A , sequences=__A , )
| 283 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 1 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray, a_: np.ndarray, a_: int, a_: int ):
_UpperCAmelCase : Optional[Any] = cva.getAffineTransform(a_, a_ )
return cva.warpAffine(a_, a_, (rows, cols) )
if __name__ == "__main__":
# read original image
__a = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__a = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__a = gray_img.shape
# set different points to rotate image
__a = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__a = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__a = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__a = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__a = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__a = plt.figure(1)
__a = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show() | 363 | '''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __UpperCAmelCase ( a_: List[str] ):
_UpperCAmelCase : Union[str, Any] = OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
_UpperCAmelCase : Optional[int] = key.replace("module.encoder", "glpn.encoder" )
if key.startswith("module.decoder" ):
_UpperCAmelCase : List[Any] = key.replace("module.decoder", "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_UpperCAmelCase : int = key[key.find("patch_embed" ) + len("patch_embed" )]
_UpperCAmelCase : Union[str, Any] = key.replace(f"""patch_embed{idx}""", f"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("norm", "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_UpperCAmelCase : str = key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
_UpperCAmelCase : Optional[Any] = key.replace(f"""layer_norm{idx}""", f"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("layer_norm1", "layer_norm_1" )
if "layer_norm2" in key:
_UpperCAmelCase : List[Any] = key.replace("layer_norm2", "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_UpperCAmelCase : Optional[Any] = key[key.find("block" ) + len("block" )]
_UpperCAmelCase : List[str] = key.replace(f"""block{idx}""", f"""block.{int(a_ )-1}""" )
if "attn.q" in key:
_UpperCAmelCase : Optional[int] = key.replace("attn.q", "attention.self.query" )
if "attn.proj" in key:
_UpperCAmelCase : List[str] = key.replace("attn.proj", "attention.output.dense" )
if "attn" in key:
_UpperCAmelCase : Dict = key.replace("attn", "attention.self" )
if "fc1" in key:
_UpperCAmelCase : List[Any] = key.replace("fc1", "dense1" )
if "fc2" in key:
_UpperCAmelCase : List[Any] = key.replace("fc2", "dense2" )
if "linear_pred" in key:
_UpperCAmelCase : Any = key.replace("linear_pred", "classifier" )
if "linear_fuse" in key:
_UpperCAmelCase : Dict = key.replace("linear_fuse.conv", "linear_fuse" )
_UpperCAmelCase : List[str] = key.replace("linear_fuse.bn", "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_UpperCAmelCase : List[Any] = key[key.find("linear_c" ) + len("linear_c" )]
_UpperCAmelCase : Tuple = key.replace(f"""linear_c{idx}""", f"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
_UpperCAmelCase : Union[str, Any] = key.replace("bot_conv", "0.convolution" )
if "skip_conv1" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv1", "1.convolution" )
if "skip_conv2" in key:
_UpperCAmelCase : Optional[int] = key.replace("skip_conv2", "2.convolution" )
if "fusion1" in key:
_UpperCAmelCase : List[str] = key.replace("fusion1", "1.fusion" )
if "fusion2" in key:
_UpperCAmelCase : List[str] = key.replace("fusion2", "2.fusion" )
if "fusion3" in key:
_UpperCAmelCase : Optional[Any] = key.replace("fusion3", "3.fusion" )
if "fusion" in key and "conv" in key:
_UpperCAmelCase : List[Any] = key.replace("conv", "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
_UpperCAmelCase : Optional[int] = key.replace("module.last_layer_depth", "head.head" )
_UpperCAmelCase : int = value
return new_state_dict
def __UpperCAmelCase ( a_: str, a_: List[Any] ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_UpperCAmelCase : Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
_UpperCAmelCase : Dict = kv_bias[: config.hidden_sizes[i]]
_UpperCAmelCase : Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
_UpperCAmelCase : Optional[Any] = kv_bias[config.hidden_sizes[i] :]
def __UpperCAmelCase ( ):
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( a_: Tuple, a_: Any, a_: Optional[Any]=False, a_: List[Any]=None ):
_UpperCAmelCase : Optional[Any] = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_UpperCAmelCase : Dict = GLPNImageProcessor()
# prepare image
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(images=a_, return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
_UpperCAmelCase : Union[str, Any] = torch.load(a_, map_location=torch.device("cpu" ) )
# rename keys
_UpperCAmelCase : List[str] = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
_UpperCAmelCase : List[str] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
_UpperCAmelCase : Dict = model(a_ )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
_UpperCAmelCase : Tuple = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
_UpperCAmelCase : Dict = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1e-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add model", use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization="nielsr", commit_message="Add image processor", use_temp_dir=a_, )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__a = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name) | 17 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: List[Any] = {
"""configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Tuple = [
"""TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimesformerModel""",
"""TimesformerForVideoClassification""",
"""TimesformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase: Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 297 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase : Any = TypeVar("""_T""")
class A__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self , lowercase = None) -> None:
'''simple docstring'''
a__ : list[_T] = list(iterable or [])
a__ : list[_T] = []
def __len__( self) -> int:
'''simple docstring'''
return len(self._stacka) + len(self._stacka)
def __repr__( self) -> str:
'''simple docstring'''
return F'Queue({tuple(self._stacka[::-1] + self._stacka)})'
def __lowercase ( self , lowercase) -> None:
'''simple docstring'''
self._stacka.append(lowercase)
def __lowercase ( self) -> _T:
'''simple docstring'''
a__ : List[str] = self._stacka.pop
a__ : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop())
if not self._stacka:
raise IndexError('Queue is empty')
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
lowerCamelCase = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
lowerCamelCase = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = RoFormerTokenizer
def __init__( self : Optional[int] , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Optional[int]="[UNK]" , _lowerCAmelCase : Tuple="[SEP]" , _lowerCAmelCase : List[Any]="[PAD]" , _lowerCAmelCase : List[Any]="[CLS]" , _lowerCAmelCase : List[Any]="[MASK]" , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : str=None , **_lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , tokenizer_file=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , tokenize_chinese_chars=_lowerCAmelCase , strip_accents=_lowerCAmelCase , **_lowerCAmelCase , )
__lowercase =json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
pre_tok_state.get('lowercase' , _lowerCAmelCase) != do_lower_case
or pre_tok_state.get('strip_accents' , _lowerCAmelCase) != strip_accents
):
__lowercase =getattr(_lowerCAmelCase , pre_tok_state.pop('type'))
__lowercase =do_lower_case
__lowercase =strip_accents
__lowercase =pre_tok_class(**_lowerCAmelCase)
__lowercase =do_lower_case
def __getstate__( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.__dict__.copy()
__lowercase =BertPreTokenizer()
return state
def __setstate__( self : int , _lowerCAmelCase : Tuple):
'''simple docstring'''
__lowercase =d
__lowercase =self.__dict__['_tokenizer'].get_vocab()
__lowercase =PreTokenizer.custom(JiebaPreTokenizer(_lowerCAmelCase))
def __lowerCamelCase ( self : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any]=None):
'''simple docstring'''
__lowercase =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] = None):
'''simple docstring'''
__lowercase =[self.sep_token_id]
__lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __lowerCamelCase ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : str = None):
'''simple docstring'''
__lowercase =self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase)
return tuple(_lowerCAmelCase)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : Dict=None , _lowerCAmelCase : Any=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =BertPreTokenizer()
return super().save_pretrained(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase)
| 356 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,) -> Tuple:
if attention_mask is None:
__lowerCamelCase : Optional[Any] = np.where(input_ids != config.pad_token_id ,1 ,0 )
if decoder_attention_mask is None:
__lowerCamelCase : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id ,1 ,0 )
if head_mask is None:
__lowerCamelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__lowerCamelCase : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__lowerCamelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] , _a : str , _a : List[str]=13 , _a : Dict=7 , _a : int=True , _a : Dict=False , _a : Tuple=99 , _a : List[str]=16 , _a : int=2 , _a : Any=4 , _a : Optional[int]=4 , _a : Any="gelu" , _a : str=0.1 , _a : Any=0.1 , _a : List[Any]=32 , _a : Tuple=2 , _a : str=1 , _a : Dict=0 , _a : List[str]=0.02 , ) -> int:
__lowerCamelCase : Dict = parent
__lowerCamelCase : str = batch_size
__lowerCamelCase : Optional[int] = seq_length
__lowerCamelCase : int = is_training
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Union[str, Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Tuple = eos_token_id
__lowerCamelCase : Tuple = pad_token_id
__lowerCamelCase : List[Any] = bos_token_id
__lowerCamelCase : int = initializer_range
def _lowercase ( self : Dict ) -> int:
__lowerCamelCase : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__lowerCamelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__lowerCamelCase : Dict = shift_tokens_right(_a , 1 , 2 )
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_a , )
__lowerCamelCase : List[str] = prepare_blenderbot_inputs_dict(_a , _a , _a )
return config, inputs_dict
def _lowercase ( self : List[str] ) -> str:
__lowerCamelCase ,__lowerCamelCase : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : List[Any] , _a : Optional[Any] , _a : List[str] , _a : List[str] ) -> List[str]:
__lowerCamelCase : List[str] = 20
__lowerCamelCase : List[str] = model_class_name(_a )
__lowerCamelCase : List[Any] = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase ,__lowerCamelCase : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCamelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
__lowerCamelCase : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCamelCase : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
__lowerCamelCase : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : str = model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
__lowerCamelCase : List[Any] = model.decode(_a , _a )
__lowerCamelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def _lowercase ( self : int , _a : Optional[Any] , _a : Tuple , _a : List[Any] ) -> Optional[Any]:
__lowerCamelCase : Tuple = 20
__lowerCamelCase : int = model_class_name(_a )
__lowerCamelCase : str = model.encode(inputs_dict['input_ids'] )
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__lowerCamelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase : Any = model.init_cache(decoder_input_ids.shape[0] , _a , _a )
__lowerCamelCase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase : Tuple = model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
__lowerCamelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCamelCase : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
__lowerCamelCase : List[str] = model.decode(_a , _a , decoder_attention_mask=_a )
__lowerCamelCase : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
a_ =99
def _lowercase ( self : List[str] ) -> str:
__lowerCamelCase : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
__lowerCamelCase : Optional[Any] = input_ids.shape[0]
__lowerCamelCase : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowercase ( self : str ) -> Optional[int]:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : Any = self._get_config_and_data()
__lowerCamelCase : Dict = FlaxBlenderbotSmallForConditionalGeneration(_a )
__lowerCamelCase : List[str] = lm_model(input_ids=_a )
__lowerCamelCase : int = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def _lowercase ( self : List[Any] ) -> int:
__lowerCamelCase : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
__lowerCamelCase : Any = FlaxBlenderbotSmallForConditionalGeneration(_a )
__lowerCamelCase : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
__lowerCamelCase : List[str] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
__lowerCamelCase : Dict = lm_model(input_ids=_a , decoder_input_ids=_a )
__lowerCamelCase : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _a )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
__lowerCamelCase : List[Any] = shift_tokens_right(_a , 1 , 2 )
__lowerCamelCase : Any = np.equal(_a , 1 ).astype(np.floataa ).sum()
__lowerCamelCase : Dict = np.equal(_a , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_a , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =True
a_ =(
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
a_ =(FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowercase ( self : Any ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = FlaxBlenderbotSmallModelTester(self )
def _lowercase ( self : Any ) -> List[Any]:
__lowerCamelCase ,__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def _lowercase ( self : str ) -> Optional[int]:
__lowerCamelCase ,__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase ,__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Any = self._prepare_for_class(_a , _a )
__lowerCamelCase : int = model_class(_a )
@jax.jit
def encode_jitted(_a : Union[str, Any] , _a : Union[str, Any]=None , **_a : List[str] ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : List[Any] = encode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Tuple = encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowercase ( self : Any ) -> List[Any]:
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : Optional[int] = model_class(_a )
__lowerCamelCase : List[Any] = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__lowerCamelCase : Any = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_a : Optional[Any] , _a : Union[str, Any] , _a : Optional[Any] ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[Any] = decode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Optional[int] = decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowercase ( self : str ) -> int:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Optional[int] = model_class_name.from_pretrained('facebook/blenderbot_small-90M' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__lowerCamelCase : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
__lowerCamelCase : Tuple = model(_a )
self.assertIsNotNone(_a )
| 208 |
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> str:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__lowerCamelCase : int = ''
while len(_lowerCAmelCase ) % 3 != 0:
__lowerCamelCase : str = '0' + bin_string
__lowerCamelCase : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(_lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowerCamelCase : Tuple = 0
for index, val in enumerate(_lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) )
oct_string += str(_lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
__lowerCAmelCase = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__UpperCamelCase , __UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = dct.pop(__UpperCamelCase )
__lowerCAmelCase = val
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ):
'''simple docstring'''
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , __UpperCamelCase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(__UpperCamelCase )
__lowerCAmelCase = create_rename_keys(__UpperCamelCase , base_model=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(__UpperCamelCase ).eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(__UpperCamelCase )
if base_model:
__lowerCAmelCase = original_model(__UpperCamelCase )
assert torch.allclose(__UpperCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
__lowerCAmelCase = original_model(__UpperCamelCase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
A : Dict = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 369 |
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = 2
while True:
if is_prime(_UpperCamelCase ):
yield num
num += 1
def _lowerCamelCase ( _UpperCamelCase = 200_0000 ):
'''simple docstring'''
return sum(takewhile(lambda _UpperCamelCase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a ):
"""simple docstring"""
lowerCamelCase = question_encoder
lowerCamelCase = generator
lowerCamelCase = self.question_encoder
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
if os.path.isfile(lowerCAmelCase_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
lowerCamelCase = os.path.join(lowerCAmelCase_ , """question_encoder_tokenizer""" )
lowerCamelCase = os.path.join(lowerCAmelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(lowerCAmelCase_ )
self.generator.save_pretrained(lowerCAmelCase_ )
@classmethod
def _lowerCAmelCase ( cls , _a , **_a ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
lowerCamelCase = kwargs.pop("""config""" , lowerCAmelCase_ )
if config is None:
lowerCamelCase = RagConfig.from_pretrained(lowerCAmelCase_ )
lowerCamelCase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
lowerCamelCase = AutoTokenizer.from_pretrained(
lowerCAmelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=lowerCAmelCase_ , generator=lowerCAmelCase_ )
def __call__( self , *_a , **_a ):
"""simple docstring"""
return self.current_tokenizer(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
return self.generator.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
return self.generator.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.question_encoder
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.generator
def _lowerCAmelCase ( self , _a , _a = None , _a = None , _a = None , _a = "longest" , _a = None , _a = True , **_a , ):
"""simple docstring"""
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , lowerCAmelCase_ , )
if max_length is None:
lowerCamelCase = self.current_tokenizer.model_max_length
lowerCamelCase = self(
lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , max_length=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
lowerCamelCase = self.current_tokenizer.model_max_length
lowerCamelCase = self(
text_target=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 291 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A = 100 ) -> int:
_snake_case = n * (n + 1) * (2 * n + 1) / 6
_snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 42 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
_a = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a_ : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
a_ : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a_ , a_ : int = gray_img.shape
# set different points to rotate image
a_ : Union[str, Any] = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a_ : Dict = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a_ : int = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a_ : Optional[Any] = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a_ : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a_ : Dict = plt.figure(1)
a_ : Tuple = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 104 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""input_features"""]
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 104 | 1 |
'''simple docstring'''
UpperCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def a__ ( lowerCAmelCase__ ) -> int:
UpperCAmelCase__ : Union[str, Any] = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
UpperCAmelCase__ : Stack[int] = Stack()
UpperCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__a ) )
elif i in operators:
# RULE 2
operator_stack.push(__a )
elif i == ")":
# RULE 4
UpperCAmelCase__ : Optional[int] = operator_stack.peek()
operator_stack.pop()
UpperCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ : int = operand_stack.peek()
operand_stack.pop()
UpperCAmelCase__ : List[Any] = operators[opr](__a , __a )
operand_stack.push(__a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
UpperCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 181 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : Dict = "ChineseCLIPImageProcessor"
UpperCAmelCase__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> Any:
_a : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Tuple = kwargs.pop('''feature_extractor''' )
_a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
_a : List[str] = self.image_processor
def __call__( self , _a=None , _a=None , _a=None , **_a ) -> int:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
_a : List[str] = self.tokenizer(_a , return_tensors=_a , **_a )
if images is not None:
_a : Optional[Any] = self.image_processor(_a , return_tensors=_a , **_a )
if text is not None and images is not None:
_a : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_a ) , tensor_type=_a )
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Any:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.tokenizer.model_input_names
_a : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __lowercase ( self ) -> Dict:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
| 235 | 0 |
from __future__ import annotations
def __lowerCAmelCase ( a__ , a__ ) -> list[tuple[int, int]]:
__a , __a = position
__a = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__a = []
for position in positions:
__a , __a = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(a__ )
return permissible_positions
def __lowerCAmelCase ( a__ ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> bool:
if is_complete(a__ ):
return True
for position in get_valid_pos(a__ , len(a__ ) ):
__a , __a = position
if board[y][x] == 0:
__a = curr + 1
if open_knight_tour_helper(a__ , a__ , curr + 1 ):
return True
__a = 0
return False
def __lowerCAmelCase ( a__ ) -> list[list[int]]:
__a = [[0 for i in range(a__ )] for j in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
__a = 1
if open_knight_tour_helper(a__ , (i, j) , 1 ):
return board
__a = 0
__a = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 33 |
import os
import numpy
import onnx
def __lowerCAmelCase ( a__ , a__ ) -> List[str]:
__a = a.name
__a = b.name
__a = ''''''
__a = ''''''
__a = a == b
__a = name_a
__a = name_b
return res
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(a__ , a__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
_graph_replace_input_with(node_proto.attribute[1].g , a__ , a__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> str:
for n in graph_proto.node:
_node_replace_input_with(a__ , a__ , a__ )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> Union[str, Any]:
__a = list(model.graph.initializer )
__a = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__a = inits[i].name
__a = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , a__ , a__ )
def __lowerCAmelCase ( a__ ) -> str:
__a = os.path.dirname(a__ )
__a = os.path.basename(a__ )
__a = onnx.load(os.path.join(a__ , a__ ) )
__a = list(model.graph.initializer )
__a = set()
__a = {}
__a = []
__a = 0
for i in range(len(a__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(a__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(a__ )
dup_set.add(a__ )
__a = inits[j].data_type
__a = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('''unexpected data type: ''' , a__ )
total_reduced_size += mem_size
__a = inits[i].name
__a = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(a__ )
else:
__a = [name_j]
ind_to_replace.append((j, i) )
print('''total reduced size: ''' , total_reduced_size / 1024 / 1024 / 1024 , '''GB''' )
__a = sorted(a__ )
_remove_dup_initializers_from_model(a__ , a__ , a__ )
__a = '''optimized_''' + model_file_name
__a = os.path.join(a__ , a__ )
onnx.save(a__ , a__ )
return new_model | 33 | 1 |
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Tuple:
'''simple docstring'''
if self.graph.get(lowercase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A__ = [[w, v]]
if not self.graph.get(lowercase ):
A__ = []
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase , lowercase ) -> int:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Any:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
A__ = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , lowercase ) -> int:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self , lowercase=-2 ) -> str:
'''simple docstring'''
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
A__ = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return sorted_nodes
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Any:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> int:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
A__ = {}
def UpperCamelCase ( self , lowercase , lowercase , lowercase=1 ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A__ = [[w, v]]
# add the other way
if self.graph.get(lowercase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A__ = [[w, u]]
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
if self.graph.get(lowercase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase )
# the other way round
if self.graph.get(lowercase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> List[str]:
'''simple docstring'''
if s == d:
return []
A__ = []
A__ = []
if s == -2:
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return visited
def UpperCamelCase ( self , lowercase=-1 ) -> str:
'''simple docstring'''
if c == -1:
A__ = floor(random() * 10000 ) + 10
for i in range(lowercase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A__ = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase , lowercase , 1 )
def UpperCamelCase ( self , lowercase=-2 ) -> Dict:
'''simple docstring'''
A__ = deque()
A__ = []
if s == -2:
A__ = list(self.graph )[0]
d.append(lowercase )
visited.append(lowercase )
while d:
A__ = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
return len(self.graph[u] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return list(lowercase )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = []
A__ = []
A__ = list(self.graph )[0]
stack.append(lowercase )
visited.append(lowercase )
A__ = -2
A__ = []
A__ = s
A__ = False
A__ = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A__ = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A__ = len(lowercase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A__ = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A__ = True
if len(lowercase ) != 0:
A__ = stack[len(lowercase ) - 1]
else:
A__ = False
indirect_parents.append(lowercase )
A__ = s
A__ = ss
# check if se have reached the starting point
if len(lowercase ) == 0:
return False
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return list(self.graph )
def UpperCamelCase ( self , lowercase=-2 , lowercase=-1 ) -> Optional[Any]:
'''simple docstring'''
A__ = time()
self.dfs(lowercase , lowercase )
A__ = time()
return end - begin
def UpperCamelCase ( self , lowercase=-2 ) -> List[Any]:
'''simple docstring'''
A__ = time()
self.bfs(lowercase )
A__ = time()
return end - begin
| 68 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'gpt_neox_japanese'
def __init__( self , lowercase=32000 , lowercase=2560 , lowercase=32 , lowercase=32 , lowercase=4 , lowercase="gelu" , lowercase=1.00 , lowercase=10000 , lowercase=2048 , lowercase=0.02 , lowercase=1e-5 , lowercase=True , lowercase=31996 , lowercase=31999 , lowercase=0.1 , lowercase=0.0 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A__ = vocab_size
A__ = max_position_embeddings
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_multiple_size
A__ = hidden_act
A__ = rotary_pct
A__ = rotary_emb_base
A__ = initializer_range
A__ = layer_norm_eps
A__ = use_cache
A__ = attention_dropout
A__ = hidden_dropout
| 68 | 1 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
'''simple docstring'''
def __init__( self : Any , lowercase__ : Optional[int] , lowercase__ : int=13 , lowercase__ : Optional[Any]=32 , lowercase__ : Any=3 , lowercase__ : Union[str, Any]=4 , lowercase__ : str=[10, 20, 30, 40] , lowercase__ : Any=[2, 2, 3, 2] , lowercase__ : Optional[Any]=True , lowercase__ : Any=True , lowercase__ : Tuple=37 , lowercase__ : List[str]="gelu" , lowercase__ : Any=10 , lowercase__ : Dict=0.02 , lowercase__ : Optional[Any]=["stage2", "stage3", "stage4"] , lowercase__ : List[Any]=[2, 3, 4] , lowercase__ : Union[str, Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_stages
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = out_features
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = scope
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels)
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def __snake_case ( self : Optional[int]):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __snake_case ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict):
'''simple docstring'''
lowerCAmelCase__ = ConvNextModel(config=lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __snake_case ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = ConvNextForImageClassification(lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__ , labels=lowercase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : Optional[Any] , lowercase__ : int , lowercase__ : Tuple , lowercase__ : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = ConvNextBackbone(config=lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
lowerCAmelCase__ = None
lowerCAmelCase__ = ConvNextBackbone(config=lowercase__)
model.to(lowercase__)
model.eval()
lowerCAmelCase__ = model(lowercase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase_ = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def __snake_case ( self : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ = ConvNextModelTester(self)
lowerCAmelCase__ = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=37)
def __snake_case ( self : Dict):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self : str):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds')
def __snake_case ( self : Dict):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings')
def __snake_case ( self : int):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking')
def __snake_case ( self : Tuple):
'''simple docstring'''
pass
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(lowercase__)
lowerCAmelCase__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase__)
def __snake_case ( self : List[Any]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__)
def __snake_case ( self : List[str]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase__)
def __snake_case ( self : Optional[int]):
'''simple docstring'''
def check_hidden_states_output(lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : List[str]):
lowerCAmelCase__ = model_class(lowercase__)
model.to(lowercase__)
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(lowercase__ , lowercase__))
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(lowercase__) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__)
def __snake_case ( self : Union[str, Any]):
'''simple docstring'''
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__)
@slow
def __snake_case ( self : Any):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = ConvNextModel.from_pretrained(lowercase__)
self.assertIsNotNone(lowercase__)
def __lowerCamelCase ( ):
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self : str):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224') if is_vision_available() else None
@slow
def __snake_case ( self : int):
'''simple docstring'''
lowerCAmelCase__ = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224').to(lowercase__)
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=lowercase__ , return_tensors='pt').to(lowercase__)
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**lowercase__)
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , lowercase__)
lowerCAmelCase__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911]).to(lowercase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4))
@require_torch
class a_ ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase_ = (ConvNextBackbone,) if is_torch_available() else ()
UpperCAmelCase_ = ConvNextConfig
UpperCAmelCase_ = False
def __snake_case ( self : str):
'''simple docstring'''
lowerCAmelCase__ = ConvNextModelTester(self)
| 354 | import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
lowerCAmelCase__ = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class a_ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int , *lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Dict=None , **lowercase__ : Optional[int]):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__)
lowerCAmelCase__ = eval_examples
lowerCAmelCase__ = post_process_function
lowerCAmelCase__ = quant_trainer_args
lowerCAmelCase__ = 128 # default number of calibration samples
def __snake_case ( self : Tuple , lowercase__ : Any=None):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.')
lowerCAmelCase__ = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCAmelCase__ = self._remove_unused_columns(lowercase__ , description='Calibration')
return DataLoader(
lowercase__ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowercase__ , )
def __snake_case ( self : List[Any] , lowercase__ : Union[str, Any]=None):
'''simple docstring'''
lowerCAmelCase__ = self.train_dataset if calib_dataset is None else calib_dataset
lowerCAmelCase__ = self.get_calib_dataloader(lowercase__)
lowerCAmelCase__ = self.model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args , calib=lowercase__)
model.eval()
quant_trainer.enable_calibration(lowercase__)
logger.info('***** Running calibration *****')
logger.info(F""" Num examples = {self.calib_num}""")
logger.info(F""" Batch size = {calib_dataloader.batch_size}""")
for step, inputs in enumerate(lowercase__):
# Prediction step
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self.prediction_step(lowercase__ , lowercase__ , prediction_loss_only=lowercase__)
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = model
def __snake_case ( self : Optional[Any] , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : str = "eval"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions)
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
self.log(lowercase__)
else:
lowerCAmelCase__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report())
lowerCAmelCase__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__)
return metrics
def __snake_case ( self : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : List[str]=None , lowercase__ : str = "test"):
'''simple docstring'''
lowerCAmelCase__ = self.get_test_dataloader(lowercase__)
# Temporarily disable metric computation, we will do it in the loop here.
lowerCAmelCase__ = self.compute_metrics
lowerCAmelCase__ = None
lowerCAmelCase__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCAmelCase__ = eval_loop(
lowercase__ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , )
finally:
lowerCAmelCase__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCAmelCase__ = self.post_process_function(lowercase__ , lowercase__ , output.predictions , 'predict')
lowerCAmelCase__ = self.compute_metrics(lowercase__)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(F"""{metric_key_prefix}_"""):
lowerCAmelCase__ = metrics.pop(lowercase__)
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__)
def __snake_case ( self : List[str] , lowercase__ : List[str]="./"):
'''simple docstring'''
lowerCAmelCase__ = self.eval_dataset
lowerCAmelCase__ = self.get_eval_dataloader(lowercase__)
lowerCAmelCase__ = next(iter(lowercase__))
# saving device - to make it consistent
lowerCAmelCase__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# convert to tuple
lowerCAmelCase__ = tuple(v.to(lowercase__) for k, v in batch.items())
logger.info('Converting model to be onnx compatible')
from pytorch_quantization.nn import TensorQuantizer
lowerCAmelCase__ = True
lowerCAmelCase__ = self.model.to(lowercase__)
model.eval()
model.float()
lowerCAmelCase__ = model.module if hasattr(lowercase__ , 'module') else model
quant_trainer.configure_model(lowercase__ , self.quant_trainer_args)
lowerCAmelCase__ = os.path.join(lowercase__ , 'model.onnx')
logger.info(F"""exporting model to {output_model_file}""")
lowerCAmelCase__ = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
lowercase__ , lowercase__ , lowercase__ , export_params=lowercase__ , opset_version=13 , do_constant_folding=lowercase__ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=lowercase__ , )
logger.info('onnx export finished')
| 119 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ :int = {
"configuration_blenderbot": [
"BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotConfig",
"BlenderbotOnnxConfig",
],
"tokenization_blenderbot": ["BlenderbotTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Optional[int] = ["BlenderbotTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotForCausalLM",
"BlenderbotForConditionalGeneration",
"BlenderbotModel",
"BlenderbotPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Any = [
"TFBlenderbotForConditionalGeneration",
"TFBlenderbotModel",
"TFBlenderbotPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :Union[str, Any] = [
"FlaxBlenderbotForConditionalGeneration",
"FlaxBlenderbotModel",
"FlaxBlenderbotPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase__ :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowercase_ (A : List[str] ):
snake_case__ : Tuple = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A , A )
def lowercase_ (A : str ):
snake_case__ , snake_case__ : Union[str, Any] = emb.weight.shape
snake_case__ : str = nn.Linear(A , A , bias=A )
snake_case__ : str = emb.weight.data
return lin_layer
def lowercase_ (A : Optional[int] , A : Union[str, Any]=None ):
snake_case__ : Any = {}
for old_key in state_dict.keys():
snake_case__ : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
snake_case__ : int = key.replace('moe_layer.experts.0' , F'''ffn.experts.expert_{expert_idx}''' )
else:
snake_case__ : Any = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
snake_case__ : Dict = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
snake_case__ : str = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
snake_case__ : str = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
snake_case__ : Tuple = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
snake_case__ : Tuple = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
snake_case__ : Optional[int] = key.replace('final_layer_norm' , 'ff_layer_norm' )
snake_case__ : Dict = state_dict[old_key]
return new_dict
def lowercase_ (A : List[Any] , A : Tuple , A : List[Any] , A : List[str] , A : str = WEIGHTS_NAME ):
snake_case__ : Dict = []
snake_case__ : str = 0
os.makedirs(A , exist_ok=A )
for expert in range(A ):
snake_case__ : Tuple = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(A ):
snake_case__ : Optional[Any] = torch.load(A )['model']
remove_ignore_keys_(A )
snake_case__ : Optional[Any] = rename_fairseq_keys(A , A )
snake_case__ : Dict = os.path.join(
A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
torch.save(A , A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A )[0]].dtype )
# Add the last block
snake_case__ : Tuple = os.path.join(A , weights_name.replace('.bin' , F'''-{len(A )+1:05d}-of-???.bin''' ) )
snake_case__ : Union[str, Any] = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(A )
snake_case__ : str = rename_fairseq_keys(A , A )
snake_case__ : Any = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A ) == 1:
snake_case__ : Any = os.path.join(A , A )
torch.save(A , A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A , A )
# Otherwise, let's build the index
snake_case__ : Tuple = {}
for idx, shard in enumerate(A ):
snake_case__ : Optional[int] = weights_name.replace('.bin' , F'''-{idx+1:05d}-of-{len(A ):05d}.bin''' )
snake_case__ : List[Any] = os.path.join(A , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(A , os.path.join(A , A ) )
for key in shard:
snake_case__ : Any = shard_file
# Add the metadata
snake_case__ : int = {'total_size': total_size}
snake_case__ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(A , A ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : Any = json.dumps(A , indent=2 , sort_keys=A ) + '\n'
f.write(A )
return metadata, index
if __name__ == "__main__":
a_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
a_ :Optional[Any] = parser.parse_args()
a_ , a_ :Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
a_ :List[str] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
a_ :int = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 277 | 0 |
import os
from collections.abc import Iterator
def snake_case_(_UpperCamelCase = "." ) -> Iterator[str]:
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(_UpperCamelCase ):
_snake_case = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_UpperCamelCase )[1] in (".py", ".ipynb"):
yield os.path.join(_UpperCamelCase , _UpperCamelCase ).lstrip('''./''' )
def snake_case_(_UpperCamelCase ) -> List[str]:
"""simple docstring"""
return F"""{i * ' '}*""" if i else "\n##"
def snake_case_(_UpperCamelCase , _UpperCamelCase ) -> str:
"""simple docstring"""
_snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_UpperCamelCase ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(_UpperCamelCase )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def snake_case_(_UpperCamelCase = "." ) -> None:
"""simple docstring"""
_snake_case = ''''''
for filepath in sorted(good_file_paths(_UpperCamelCase ) ):
_snake_case, _snake_case = os.path.split(_UpperCamelCase )
if filepath != old_path:
_snake_case = print_path(_UpperCamelCase , _UpperCamelCase )
_snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
_snake_case = F"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
_snake_case = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"""{md_prefix(_UpperCamelCase )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 278 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = CLIPTokenizer
UpperCamelCase_ : Optional[int] = CLIPTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Optional[Any] = False
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
super().setUp()
# fmt: off
_snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase_ ( self : List[Any] , **A__ : int ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Any , **A__ : Tuple ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> str:
_snake_case = '''lower newer'''
_snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[int]:
_snake_case = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = '''lower newer'''
_snake_case = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
_snake_case = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@require_ftfy
def UpperCamelCase_ ( self : Any ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_snake_case = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of space type
_snake_case = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of line break type
_snake_case = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
_snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
_snake_case = f""" {text}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
_snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(A__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def UpperCamelCase_ ( self : Dict ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def UpperCamelCase_ ( self : str ) -> Optional[int]:
# CLIP always lower cases letters
pass
| 278 | 1 |
def _A ( _lowercase = 3 , _lowercase = 7 , _lowercase = 1_00_00_00 ) -> int:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase = current_numerator
__UpperCamelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 310 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42 | 0 |
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( __lowercase ):
def __init__( self : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : int , __magic_name__ : Dict=10_24 , __magic_name__ : List[Any]=10_24 , __magic_name__ : Optional[Any]=3.6 ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = tokenizer
__snake_case : Tuple = tokenizer.bos_token_id
__snake_case : Optional[int] = dataset
__snake_case : int = seq_length
__snake_case : Optional[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[int] ) -> Dict:
"""simple docstring"""
__snake_case : int = iter(self.dataset )
__snake_case : str = True
while more_examples:
__snake_case , __snake_case : str = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__magic_name__ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
__snake_case : Union[str, Any] = False
break
__snake_case : List[Any] = tokenizer(__magic_name__ , truncation=__magic_name__ )["""input_ids"""]
__snake_case : Union[str, Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__magic_name__ ) , self.seq_length ):
__snake_case : Tuple = all_token_ids[i : i + self.seq_length]
if len(__magic_name__ ) == self.seq_length:
yield torch.tensor(__magic_name__ )
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = {"""streaming""": True}
__snake_case : int = load_dataset(args.dataset_name , split="""train""" , **_lowerCamelCase )
__snake_case : Any = ConstantLengthDataset(_lowerCamelCase , _lowerCamelCase , seq_length=args.seq_length )
__snake_case : int = DataLoader(_lowerCamelCase , batch_size=args.batch_size )
return eval_dataloader
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
model.eval()
__snake_case : Dict = []
for step, batch in enumerate(_lowerCamelCase ):
with torch.no_grad():
__snake_case : Union[str, Any] = model(_lowerCamelCase , labels=_lowerCamelCase )
__snake_case : Tuple = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_lowerCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__snake_case : Tuple = torch.mean(torch.cat(_lowerCamelCase ) )
try:
__snake_case : Optional[Any] = torch.exp(_lowerCamelCase )
except OverflowError:
__snake_case : Dict = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
__UpperCamelCase = Accelerator()
# Parse configuration
__UpperCamelCase = HfArgumentParser(EvaluationArguments)
__UpperCamelCase = parser.parse_args()
set_seed(args.seed)
# Logging
__UpperCamelCase = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
__UpperCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
__UpperCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
__UpperCamelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
__UpperCamelCase , __UpperCamelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
__UpperCamelCase , __UpperCamelCase = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 13 |
'''simple docstring'''
def _a ( _lowerCamelCase ) -> bool:
"""simple docstring"""
__snake_case : Optional[int] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( _lowerCamelCase = 5000 ) -> int:
"""simple docstring"""
__snake_case : int = [(i * (3 * i - 1)) // 2 for i in range(1 , _lowerCamelCase )]
for i, pentagonal_i in enumerate(_lowerCamelCase ):
for j in range(_lowerCamelCase , len(_lowerCamelCase ) ):
__snake_case : Optional[int] = pentagonal_nums[j]
__snake_case : str = pentagonal_i + pentagonal_j
__snake_case : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(_lowerCamelCase ) and is_pentagonal(_lowerCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 13 | 1 |
_UpperCAmelCase : dict[str, float] ={
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.6_0217_6634E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.35_5818,
}
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase_ : Dict = (
f"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
f"""Valid values are: {", ".join(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod() | 262 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = ['model.decoder.embed_positions.weights']
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if "emb" in name:
A_ : Tuple = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
A_ : Optional[int] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
A_ : Optional[Any] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
A_ : int = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
A_ : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
A_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
A_ : Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
A_ : Dict = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
A_ : Tuple = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
A_ : Union[str, Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
A_ : Tuple = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = list(state_dict.keys() )
A_ : List[Any] = {}
for key in keys:
A_ : List[str] = state_dict.pop(_UpperCAmelCase )
A_ : Tuple = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
A_ : Any = val[:hidden_size, :]
A_ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
A_ : Union[str, Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
A_ : List[str] = val
else:
A_ : int = val
return state_dict, enc_dec_proj_state_dict
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if checkpoint == "small":
# default config values
A_ : Optional[Any] = 1024
A_ : Tuple = 24
A_ : int = 16
elif checkpoint == "medium":
A_ : Any = 1536
A_ : Union[str, Any] = 48
A_ : List[Any] = 24
elif checkpoint == "large":
A_ : Optional[int] = 2048
A_ : Optional[int] = 48
A_ : Tuple = 32
else:
raise ValueError(f"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
A_ : Tuple = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
"""simple docstring"""
A_ : Any = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
A_ : str = decoder_config_from_checkpoint(_UpperCAmelCase )
A_ : Optional[int] = fairseq_model.lm.state_dict()
A_ , A_ : str = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
A_ : List[str] = TaEncoderModel.from_pretrained('''t5-base''' )
A_ : Tuple = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
A_ : Union[str, Any] = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
A_ , A_ : Tuple = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
A_ : Tuple = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
A_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
A_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
A_ : Tuple = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
A_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
A_ : int = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
A_ : Optional[int] = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
A_ : Tuple = 2048
A_ : Union[str, Any] = 2048
# set other default generation config params
A_ : Union[str, Any] = int(30 * audio_encoder.config.frame_rate )
A_ : List[str] = True
A_ : List[str] = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 167 | 0 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__A = 16
__A = 32
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_6 ) -> Tuple:
__lowerCAmelCase: str = AutoTokenizer.from_pretrained("bert-base-cased" )
__lowerCAmelCase: int = load_dataset("glue" , "mrpc" )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase: Optional[int] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase: Any = datasets.map(
__snake_case , batched=__snake_case , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase: str = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase: List[str] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase: Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
__lowerCAmelCase: Optional[int] = 8
else:
__lowerCAmelCase: List[Any] = None
return tokenizer.pad(
__snake_case , padding="longest" , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors="pt" , )
# Instantiate dataloaders.
__lowerCAmelCase: Optional[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
__lowerCAmelCase: Optional[int] = DataLoader(
tokenized_datasets["validation"] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == "fp8") , )
return train_dataloader, eval_dataloader
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# Initialize accelerator
__lowerCAmelCase: Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase: Any = config["lr"]
__lowerCAmelCase: Any = int(config["num_epochs"] )
__lowerCAmelCase: str = int(config["seed"] )
__lowerCAmelCase: List[Any] = int(config["batch_size"] )
__lowerCAmelCase: List[Any] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase: str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase: Any = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase: Optional[Any] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
__lowerCAmelCase: List[Any] = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase: int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase: Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase: List[str] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
__lowerCAmelCase: Optional[int] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=1_0_0 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase: Union[str, Any] = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase: List[str] = model(**__snake_case )
__lowerCAmelCase: int = outputs.loss
__lowerCAmelCase: Dict = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase: int = model(**__snake_case )
__lowerCAmelCase: Union[str, Any] = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase: Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
__lowerCAmelCase: Union[str, Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def a__ ( ) -> Optional[Any]:
__lowerCAmelCase: Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=__snake_case , default=__snake_case , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
__lowerCAmelCase: Optional[int] = parser.parse_args()
__lowerCAmelCase: Tuple = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 364 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[Any]=1_8 , UpperCamelCase__ : List[Any]=3_0 , UpperCamelCase__ : List[str]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=[0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : str=[0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : List[str]=True , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Dict = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__lowerCAmelCase: Union[str, Any] = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
__lowerCAmelCase: Optional[int] = parent
__lowerCAmelCase: List[str] = batch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Optional[Any] = image_size
__lowerCAmelCase: Tuple = min_resolution
__lowerCAmelCase: List[str] = max_resolution
__lowerCAmelCase: List[Any] = do_resize
__lowerCAmelCase: Union[str, Any] = size
__lowerCAmelCase: List[Any] = do_center_crop
__lowerCAmelCase: Optional[int] = crop_size
__lowerCAmelCase: Dict = do_normalize
__lowerCAmelCase: List[str] = image_mean
__lowerCAmelCase: Optional[int] = image_std
__lowerCAmelCase: str = do_convert_rgb
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Dict=False)-> List[str]:
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__lowerCAmelCase: Optional[int] = []
for i in range(self.batch_size):
image_inputs.append(
np.random.randint(
2_5_5 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta))
else:
__lowerCAmelCase: List[str] = []
for i in range(self.batch_size):
__lowerCAmelCase , __lowerCAmelCase: List[str] = np.random.choice(np.arange(self.min_resolution , self.max_resolution) , 2)
image_inputs.append(np.random.randint(2_5_5 , size=(self.num_channels, width, height) , dtype=np.uinta))
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__lowerCAmelCase: Union[str, Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1)) for x in image_inputs]
if torchify:
__lowerCAmelCase: str = [torch.from_numpy(UpperCamelCase__) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : str = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : Any)-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = ChineseCLIPImageProcessingTester(self , do_center_crop=UpperCamelCase__)
@property
def lowercase_ ( self : Any)-> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any])-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : List[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 2_2_4, "width": 2_2_4})
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8})
__lowerCAmelCase: List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4)
self.assertEqual(image_processor.size , {"shortest_edge": 4_2})
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4})
def lowercase_ ( self : List[str])-> Optional[int]:
'''simple docstring'''
pass
def lowercase_ ( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: Union[str, Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: Optional[int] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: int = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase: List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Any = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase: Dict = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor)
# Test not batched input
__lowerCAmelCase: Tuple = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class snake_case ( __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : List[str] = ChineseCLIPImageProcessor if is_vision_available() else None
def lowercase_ ( self : int)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = 3
@property
def lowercase_ ( self : Union[str, Any])-> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase__ , "size"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase__ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase__ , "do_convert_rgb"))
def lowercase_ ( self : Tuple)-> Any:
'''simple docstring'''
pass
def lowercase_ ( self : Tuple)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCAmelCase: int = self.image_processor_tester.prepare_inputs(equal_resolution=UpperCamelCase__)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image)
# Test not batched input
__lowerCAmelCase: List[Any] = image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase: Optional[int] = image_processing(UpperCamelCase__ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 108 | 0 |
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__A : int = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
__A : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = cn.convert_to_negative(_UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCAmelCase, 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : List[Any] = canny.gen_gaussian_kernel(9, sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase : Any = imread('digital_image_processing/image_data/lena_small.jpg', 0 )
# assert ambiguous array for all == True
assert canny_img.all()
lowerCAmelCase : Dict = canny.canny(_UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCAmelCase, 5, sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
lowerCAmelCase : List[Any] = conv.img_convolve(_UpperCAmelCase, _UpperCAmelCase ).astype(_UpperCAmelCase )
assert res.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
assert med.median_filter(_UpperCAmelCase, 3 ).any()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase : Any = sob.sobel_filter(_UpperCAmelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = sp.make_sepia(_UpperCAmelCase, 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase : Optional[Any] = bs.Burkes(imread(_UpperCAmelCase, 1 ), 120 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = "digital_image_processing/image_data/lena_small.jpg", ) -> str:
'''simple docstring'''
lowerCAmelCase : int = rs.NearestNeighbour(imread(_UpperCAmelCase, 1 ), 400, 200 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase : Dict = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
lowerCAmelCase : Dict = imread(_UpperCAmelCase, 0 )
# Test for get_neighbors_pixel function() return not None
lowerCAmelCase : Any = 0
lowerCAmelCase : str = 0
lowerCAmelCase : List[Any] = image[x_coordinate][y_coordinate]
lowerCAmelCase : List[Any] = lbp.get_neighbors_pixel(
_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCAmelCase : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0, image.shape[0] ):
for j in range(0, image.shape[1] ):
lowerCAmelCase : Tuple = lbp.local_binary_value(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
assert lbp_image.any()
| 138 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class __A :
lowerCAmelCase_ : List[int]
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[Union[int, float]] = None
lowerCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : Tuple = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
lowerCAmelCase : str = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : List[Any] = label_list[2], label_list[1]
lowerCAmelCase : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase : Any = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowerCAmelCase : int = torch.load(UpperCAmelCase_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowerCAmelCase : Optional[int] = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info('Training examples: %s' , len(UpperCAmelCase_ ) )
lowerCAmelCase : List[str] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('Saving features into cached file %s' , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : List[str] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class __A :
lowerCAmelCase_ : List[InputFeatures]
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , ):
lowerCAmelCase : List[Any] = hans_processors[task]()
lowerCAmelCase : List[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCAmelCase , lowerCAmelCase : int = label_list[2], label_list[1]
lowerCAmelCase : str = label_list
lowerCAmelCase : Union[str, Any] = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowerCAmelCase : Tuple = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self : Dict ):
return self.dataset
def __len__( self : Optional[int] ):
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ):
return self.features[i]
def lowercase__ ( self : int ):
return self.label_list
class __A ( lowerCAmelCase ):
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_train_set.txt' ) ) , 'train' )
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Any ):
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def lowercase__ ( self : Optional[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
lowerCAmelCase : Union[str, Any] = '%s-%s' % (set_type, line[0])
lowerCAmelCase : Optional[int] = line[5]
lowerCAmelCase : Optional[int] = line[6]
lowerCAmelCase : Dict = line[7][2:] if line[7].startswith('ex' ) else line[7]
lowerCAmelCase : List[str] = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, ) -> Dict:
'''simple docstring'''
lowerCAmelCase : List[Any] = {label: i for i, label in enumerate(_UpperCAmelCase )}
lowerCAmelCase : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ), desc='convert examples to features' ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d' % (ex_index) )
lowerCAmelCase : Any = tokenizer(
example.text_a, example.text_b, add_special_tokens=_UpperCAmelCase, max_length=_UpperCAmelCase, padding='max_length', truncation=_UpperCAmelCase, return_overflowing_tokens=_UpperCAmelCase, )
lowerCAmelCase : Union[str, Any] = label_map[example.label] if example.label in label_map else 0
lowerCAmelCase : Optional[Any] = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase, label=_UpperCAmelCase, pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('*** Example ***' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
__A : Union[str, Any] = {
'''hans''': 3,
}
__A : List[Any] = {
'''hans''': HansProcessor,
}
| 138 | 1 |
def lowerCAmelCase__ ( _a : int ):
if num <= 0:
raise ValueError("Input must be a positive integer" )
snake_case_ : Any = [True] * (num + 1)
snake_case_ : List[str] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __lowerCAmelCase ):
snake_case_ : List[str] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : Optional[Any] = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 370 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def lowerCAmelCase__ ( ):
snake_case_ : str = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=_a , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=_a , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=_a )
return parser.parse_args()
def lowerCAmelCase__ ( ):
snake_case_ : str = parse_args()
# Import training_script as a module.
snake_case_ : Any = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ : Tuple = script_fpath.stem
snake_case_ : str = importlib.import_module(_a )
# Patch sys.argv
snake_case_ : Optional[int] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 36 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : List[Any] = abs(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Any = abs(_UpperCAmelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
return sum(int(_UpperCAmelCase ) for c in str(abs(_UpperCAmelCase ) ) )
def SCREAMING_SNAKE_CASE ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase ) -> None:
lowerCamelCase__ : Dict = F"""{func.__name__}({value})"""
lowerCamelCase__ : Union[str, Any] = timeit(F"""__main__.{call}""" , setup='import __main__' )
print(F"""{call:56} = {func(_UpperCAmelCase )} -- {timing:.4f} seconds""" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_UpperCAmelCase , _UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 50 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCamelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCamelCase = get_activation("gelu" )
UpperCamelCase = get_activation("gelu_10" )
UpperCamelCase = torch_builtin(__a )
UpperCamelCase = geluaa(__a )
UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ (self ) -> Any:
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = get_activation("gelu" )
UpperCamelCase = 1
UpperCamelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
UpperCamelCase = acta.a
| 153 | 0 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A (__A : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = re.sub(R'''#.*''' , '''''' , _a ) # remove comments
if line:
filtered_lines.append(_a )
UpperCAmelCase_ = """\n""".join(_a )
# Make a hash from all this code
UpperCAmelCase_ = full_str.encode('''utf-8''' )
return shaaaa(_a ).hexdigest()
# get importable module names and hash for caching
snake_case_ : Optional[Any] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
snake_case_ : Optional[Any] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
snake_case_ : Optional[int] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
snake_case_ : Union[str, Any] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 364 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-base''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
@slow
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = XLMRobertaModel.from_pretrained('''xlm-roberta-large''')
UpperCAmelCase_ = torch.tensor([[0, 581, 10269, 83, 99942, 136, 60742, 23, 70, 80583, 18276, 2]])
# The dog is cute and lives in the garden house
UpperCAmelCase_ = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase_ = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase_ = model(_snake_case)['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _snake_case)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _snake_case , atol=1e-3))
| 7 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11 |
"""simple docstring"""
import qiskit
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = qiskit.Aer.get_backend('aer_simulator' )
A_ : str = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
A_ : Optional[Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase_ : List[str] = half_adder(1, 1)
print(F"Half Adder Output Qubit Counts: {counts}") | 286 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : List[str] = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 82 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowerCAmelCase_ (lowerCAmelCase__: int ):
"""simple docstring"""
UpperCAmelCase_: str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
UpperCAmelCase_: Tuple = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
UpperCAmelCase_: List[str] = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase_: Optional[Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowerCAmelCase__ )-1}' )
if "norm" in key:
UpperCAmelCase_: Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase_: Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
UpperCAmelCase_: str = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowerCAmelCase__ )-1}' )
if "layer_norm1" in key:
UpperCAmelCase_: Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase_: int = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase_: Any = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase_: Optional[Any] = key.replace(F'block{idx}' , F'block.{int(lowerCAmelCase__ )-1}' )
if "attn.q" in key:
UpperCAmelCase_: Dict = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase_: Tuple = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase_: str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase_: Tuple = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase_: Optional[int] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase_: Optional[int] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase_: Tuple = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase_: List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase_: Optional[int] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase_: List[str] = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowerCAmelCase__ )-1}' )
if "bot_conv" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
UpperCAmelCase_: Any = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
UpperCAmelCase_: Optional[Any] = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
UpperCAmelCase_: Dict = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
UpperCAmelCase_: Any = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
UpperCAmelCase_: Union[str, Any] = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
UpperCAmelCase_: List[Any] = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
UpperCAmelCase_: Union[str, Any] = key.replace("""module.last_layer_depth""" , """head.head""" )
UpperCAmelCase_: Union[str, Any] = value
return new_state_dict
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: str ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase_: str = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
UpperCAmelCase_: Tuple = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase_: Any = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase_: Tuple = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase_: Optional[int] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase_: int = kv_bias[config.hidden_sizes[i] :]
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_: List[Any] = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: List[Any] , lowerCAmelCase__: List[Any]=False , lowerCAmelCase__: Optional[Any]=None ):
"""simple docstring"""
UpperCAmelCase_: str = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
UpperCAmelCase_: Dict = GLPNImageProcessor()
# prepare image
UpperCAmelCase_: List[str] = prepare_img()
UpperCAmelCase_: List[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
UpperCAmelCase_: Any = torch.load(lowerCAmelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
UpperCAmelCase_: Optional[Any] = rename_keys(lowerCAmelCase__ )
# key and value matrices need special treatment
read_in_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# create HuggingFace model and load state dict
UpperCAmelCase_: Dict = GLPNForDepthEstimation(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
model.eval()
# forward pass
UpperCAmelCase_: Any = model(lowerCAmelCase__ )
UpperCAmelCase_: Union[str, Any] = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
UpperCAmelCase_: List[str] = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
UpperCAmelCase_: Any = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowerCAmelCase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase__ , lowerCAmelCase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowerCAmelCase__ , )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
a : List[Any] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 1 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__ ( __a : List[str] , __a : Tuple , __a : Tuple ):
'''simple docstring'''
def get_masked_lm_array(__a : List[str] ):
UpperCamelCase__ = f"masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCamelCase__ = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
UpperCamelCase__ = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_array(__a : Optional[int] ):
UpperCamelCase__ = f"encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCamelCase__ = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
UpperCamelCase__ = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_layer_array(__a : Any , __a : Tuple ):
UpperCamelCase__ = f"encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCamelCase__ = tf.train.load_variable(A__ , A__ )
if "kernel" in name:
UpperCamelCase__ = array.transpose()
return torch.from_numpy(A__ )
def get_encoder_attention_layer_array(__a : str , __a : List[Any] , __a : str ):
UpperCamelCase__ = f"encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"
UpperCamelCase__ = tf.train.load_variable(A__ , A__ )
UpperCamelCase__ = array.reshape(A__ )
if "kernel" in name:
UpperCamelCase__ = array.transpose()
return torch.from_numpy(A__ )
print(f"Loading model based on config from {config_path}..." )
UpperCamelCase__ = BertConfig.from_json_file(A__ )
UpperCamelCase__ = BertForMaskedLM(A__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
UpperCamelCase__ = model.bert.encoder.layer[layer_index]
# Self-attention
UpperCamelCase__ = layer.attention.self
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
UpperCamelCase__ = layer.attention.output
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
UpperCamelCase__ = get_encoder_attention_layer_array(
A__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
UpperCamelCase__ = get_encoder_layer_array(A__ , """_attention_layer_norm/gamma""" )
UpperCamelCase__ = get_encoder_layer_array(A__ , """_attention_layer_norm/beta""" )
# Intermediate
UpperCamelCase__ = layer.intermediate
UpperCamelCase__ = get_encoder_layer_array(A__ , """_intermediate_dense/kernel""" )
UpperCamelCase__ = get_encoder_layer_array(A__ , """_intermediate_dense/bias""" )
# Output
UpperCamelCase__ = layer.output
UpperCamelCase__ = get_encoder_layer_array(A__ , """_output_dense/kernel""" )
UpperCamelCase__ = get_encoder_layer_array(A__ , """_output_dense/bias""" )
UpperCamelCase__ = get_encoder_layer_array(A__ , """_output_layer_norm/gamma""" )
UpperCamelCase__ = get_encoder_layer_array(A__ , """_output_layer_norm/beta""" )
# Embeddings
UpperCamelCase__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
UpperCamelCase__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
UpperCamelCase__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
UpperCamelCase__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
UpperCamelCase__ = model.cls.predictions.transform
UpperCamelCase__ = get_masked_lm_array("""dense/kernel""" )
UpperCamelCase__ = get_masked_lm_array("""dense/bias""" )
UpperCamelCase__ = get_masked_lm_array("""layer_norm/gamma""" )
UpperCamelCase__ = get_masked_lm_array("""layer_norm/beta""" )
UpperCamelCase__ = get_masked_lm_array("""embedding_table""" )
# Pooling
UpperCamelCase__ = BertPooler(config=A__ )
UpperCamelCase__ = get_encoder_array("""_pooler_layer/kernel""" )
UpperCamelCase__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(A__ )
# Integration test - should load without any errors ;)
UpperCamelCase__ = BertForMaskedLM.from_pretrained(A__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow Token Dropping checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
lowerCamelCase_ = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 244 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def A_ ( A__ ) -> List[str]:
a__ : Union[str, Any] = torch.load(A__ , map_location='cpu' )
if "model" in sd.keys():
a__ : Tuple = torch.load(A__ , map_location='cpu' )['model']
# pop unnecessary weights
a__ : Optional[Any] = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
a__ : Union[str, Any] = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
a__ : Any = sd.pop(A__ )
a__ : Optional[int] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
a__ : str = sd[key]
# We split QKV in separate Q,K,V
a__ : Union[str, Any] = key.replace('.qkv_proj.' , '.q_proj.' )
a__ : int = key.replace('.qkv_proj.' , '.k_proj.' )
a__ : Optional[int] = key.replace('.qkv_proj.' , '.v_proj.' )
a__ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
a__ , a__ , a__ : Any = torch.split(A__ , depth // 3 , dim=0 )
a__ : int = q
a__ : List[Any] = k
a__ : int = v
del sd[key]
return sd
@torch.no_grad()
def A_ ( A__ , A__ , A__=None ) -> Union[str, Any]:
a__ : Union[str, Any] = load_checkpoint(A__ )
if config is not None:
a__ : List[Any] = OPTConfig.from_pretrained(A__ )
else:
a__ : int = OPTConfig()
a__ : Optional[int] = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
lowercase : Any = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 99 | 0 |
# flake8: noqa
# Lint as: python3
UpperCAmelCase__ : Union[str, Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 356 |
import inspect
import unittest
class __A ( unittest.TestCase ):
def _snake_case ( self ):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def _snake_case ( self ):
import diffusers
from diffusers.dependency_versions_table import deps
lowerCamelCase =inspect.getmembers(UpperCAmelCase_ , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCamelCase ="""k-diffusion"""
elif backend == "invisible_watermark":
lowerCamelCase ="""invisible-watermark"""
assert backend in deps, f"""{backend} is not in the deps table!"""
| 262 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.