code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=__snake_case ):
_lowerCAmelCase = ["keras_nlp"]
def __init__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
requires_backends(self , ["""keras_nlp"""] )
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : int ):
if digit_amount > 0:
return round(number - int(_lowerCamelCase ) , _lowerCamelCase )
return number - int(_lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowercase__ = _symbol_database.Default()
lowercase__ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
lowercase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowercase__ = None
lowercase__ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowercase__ = 45
lowercase__ = 1581
lowercase__ = 1517
lowercase__ = 1570
lowercase__ = 1584
lowercase__ = 1793
lowercase__ = 1795
lowercase__ = 1916
lowercase__ = 1864
lowercase__ = 1905
lowercase__ = 1919
lowercase__ = 2429
lowercase__ = 2208
lowercase__ = 2418
lowercase__ = 2323
lowercase__ = 2407
# @@protoc_insertion_point(module_scope)
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__a : str = number_of_bytes // partitions
__a : Optional[int] = []
for i in range(_lowerCamelCase ):
__a : int = i * bytes_per_partition + 1
__a : Union[str, Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F'''{start_bytes}-{end_bytes}''' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : list[list[Edge]] = [[] for _ in range(_lowercase )]
__a : Dict = size
def __getitem__(self , _lowercase ):
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._size
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(_lowercase , _lowercase ) )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = deque([start_vertex] )
__a : list[int | None] = [None] * self.size
__a : Tuple = 0
while queue:
__a : Union[str, Any] = queue.popleft()
__a : Any = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
__a : Union[str, Any] = current_distance + edge.weight
__a : Dict = distances[edge.destination_vertex]
if (
isinstance(_lowercase , _lowercase )
and new_distance >= dest_vertex_distance
):
continue
__a : Optional[Any] = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowercase__ = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 12
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 12
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : int = 12
__a : List[Any] = 12
__a : Dict = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__a : List[Any] = TransformeraDModel(**_lowercase )
return model
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = """cpu"""
__a : Optional[Any] = self.dummy_vqvae
__a : Tuple = self.dummy_text_encoder
__a : Tuple = self.dummy_tokenizer
__a : Tuple = self.dummy_transformer
__a : Union[str, Any] = VQDiffusionScheduler(self.num_embed )
__a : str = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
__a : Optional[Any] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Union[str, Any] = """teddy bear playing in the pool"""
__a : int = torch.Generator(device=_lowercase ).manual_seed(0 )
__a : int = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
__a : List[str] = output.images
__a : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
__a : Tuple = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
__a : Tuple = image[0, -3:, -3:, -1]
__a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a : Dict = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = """cpu"""
__a : str = self.dummy_vqvae
__a : Any = self.dummy_text_encoder
__a : Any = self.dummy_tokenizer
__a : Dict = self.dummy_transformer
__a : Any = VQDiffusionScheduler(self.num_embed )
__a : List[Any] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__a : str = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
__a : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Any = """teddy bear playing in the pool"""
__a : Optional[int] = torch.Generator(device=_lowercase ).manual_seed(0 )
__a : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="""np""" )
__a : Any = output.images
__a : int = torch.Generator(device=_lowercase ).manual_seed(0 )
__a : Dict = pipe(
[prompt] , generator=_lowercase , output_type="""np""" , return_dict=_lowercase , num_inference_steps=2 )[0]
__a : List[str] = image[0, -3:, -3:, -1]
__a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__a : Dict = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
__a : Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
__a : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__a : Any = torch.Generator(device=_lowercase ).manual_seed(0 )
__a : Optional[Any] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=_lowercase , output_type="""np""" , )
__a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
from math import pi, sqrt
def __magic_name__ ( _lowerCamelCase : float ):
if num <= 0:
raise ValueError("""math domain error""" )
if num > 1_71.5:
raise OverflowError("""math range error""" )
elif num - int(_lowerCamelCase ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_lowerCamelCase )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __magic_name__ ( ):
assert gamma(0.5 ) == sqrt(_lowerCamelCase )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase__ = 1.0
while num:
lowercase__ = float(input("Gamma of: "))
print(f'gamma({num}) = {gamma(num)}')
print("\nEnter 0 to exit...")
| 63 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 1 |
"""simple docstring"""
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase__ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=16 , _lowercase=13 , _lowercase=7 , _lowercase=14 , _lowercase=10 , _lowercase=19 , _lowercase=5 , _lowercase=4 , _lowercase=True , _lowercase=16 , _lowercase=2 , _lowercase=4 , _lowercase=4 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=[1, 2, 3, 4, 5] , _lowercase=25 , _lowercase=5 , ):
'''simple docstring'''
__a : int = d_model
__a : Dict = parent
__a : Tuple = batch_size
__a : str = prediction_length
__a : Optional[int] = context_length
__a : Tuple = cardinality
__a : Any = num_time_features
__a : Union[str, Any] = lags_sequence
__a : str = embedding_dimension
__a : Union[str, Any] = is_training
__a : Optional[Any] = hidden_size
__a : List[Any] = num_hidden_layers
__a : Dict = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : Any = hidden_act
__a : Any = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Any = context_length
__a : int = prediction_length + label_length
__a : Tuple = label_length
__a : str = moving_average
__a : Optional[int] = autocorrelation_factor
def lowerCAmelCase__(self ):
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[int] = config.context_length + max(config.lags_sequence )
__a : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
__a : List[Any] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
__a : Any = floats_tensor([self.batch_size, _past_length] )
__a : List[str] = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
__a : Union[str, Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
__a : str = floats_tensor([self.batch_size, config.prediction_length] )
__a : Dict = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.get_config()
__a : List[Any] = self.prepare_autoformer_inputs_dict(_lowercase )
return config, inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = AutoformerModel(config=_lowercase ).to(_lowercase ).eval()
__a : Tuple = model(**_lowercase )
__a : str = outputs.encoder_last_hidden_state
__a : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[str] = model.get_encoder()
encoder.save_pretrained(_lowercase )
__a : Union[str, Any] = AutoformerEncoder.from_pretrained(_lowercase ).to(_lowercase )
__a , __a , __a , __a , __a : Optional[Any] = model.create_network_inputs(**_lowercase )
__a , __a : int = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
__a : Dict = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
__a : List[str] = encoder(inputs_embeds=_lowercase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
__a : Dict = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
__a : int = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
__a : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
__a : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
__a : List[Any] = model.get_decoder()
decoder.save_pretrained(_lowercase )
__a : int = AutoformerDecoder.from_pretrained(_lowercase ).to(_lowercase )
__a : int = decoder(
trend=_lowercase , inputs_embeds=_lowercase , encoder_hidden_states=_lowercase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_lowerCAmelCase = (AutoformerForPrediction,) if is_torch_available() else ()
_lowerCAmelCase = {"feature-extraction": AutoformerModel} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = AutoformerModelTester(self )
__a : Dict = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
__a : Optional[Any] = model_class(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
__a , __a : str = model_class.from_pretrained(_lowercase , output_loading_info=_lowercase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowercase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = inspect.signature(getattr(_lowercase , """forward""" ) )
# The main input is the name of the argument after `self`
__a : str = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Tuple = model_class(_lowercase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[Any] = [*signature.parameters.keys()]
__a : Dict = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(_lowercase )] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[Any] = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
__a : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : List[str] = getattr(self.model_tester , """d_model""" , _lowercase )
__a : Union[str, Any] = getattr(self.model_tester , """num_attention_heads""" , _lowercase )
__a : List[Any] = d_model // num_attention_heads
for model_class in self.all_model_classes:
__a : int = True
__a : Union[str, Any] = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__a : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[int] = True
__a : Optional[int] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__a : List[str] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__a : int = outputs.encoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
__a : Dict = len(_lowercase )
__a : int = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_lowercase , _lowercase )
# decoder attentions
__a : Dict = outputs.decoder_attentions
self.assertIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
__a : Optional[Any] = outputs.cross_attentions
self.assertIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
__a : Any = True
__a : Any = True
__a : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__a : List[str] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + 2 , len(_lowercase ) )
__a : Optional[int] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def lowerCAmelCase__(self ):
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __magic_name__ ( _lowerCamelCase : Tuple="train-batch.pt" ):
__a : Any = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_lowerCamelCase , repo_type="""dataset""" )
__a : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
return batch
@require_torch
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowercase )
__a : int = prepare_batch()
with torch.no_grad():
__a : List[Any] = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
__a : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _lowercase )
__a : Any = torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=_lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowercase )
__a : List[str] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__a : str = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
__a : Optional[int] = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _lowercase )
__a : str = torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=_lowercase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(_lowercase )
__a : Optional[Any] = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
__a : Tuple = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
__a : str = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _lowercase )
__a : List[Any] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_lowercase )
__a : List[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _lowercase , rtol=1e-1 ) )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
lowercase__ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase__ = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase__ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
lowercase__ = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def __magic_name__ ( ):
__a : Dict = input("""Enter message: """ )
__a : Union[str, Any] = input("""Enter key [alphanumeric]: """ )
__a : Optional[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__a : int = """encrypt"""
__a : str = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__a : Optional[Any] = """decrypt"""
__a : Optional[Any] = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ):
__a : Optional[int] = []
__a : str = 0
__a : Union[str, Any] = key.upper()
for symbol in message:
__a : Dict = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__a : Optional[int] = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowercase__ = logging.get_logger(__name__)
lowercase__ = OrderedDict(
[
# Base model mapping
("albert", "FlaxAlbertModel"),
("bart", "FlaxBartModel"),
("beit", "FlaxBeitModel"),
("bert", "FlaxBertModel"),
("big_bird", "FlaxBigBirdModel"),
("blenderbot", "FlaxBlenderbotModel"),
("blenderbot-small", "FlaxBlenderbotSmallModel"),
("clip", "FlaxCLIPModel"),
("distilbert", "FlaxDistilBertModel"),
("electra", "FlaxElectraModel"),
("gpt-sw3", "FlaxGPT2Model"),
("gpt2", "FlaxGPT2Model"),
("gpt_neo", "FlaxGPTNeoModel"),
("gptj", "FlaxGPTJModel"),
("longt5", "FlaxLongT5Model"),
("marian", "FlaxMarianModel"),
("mbart", "FlaxMBartModel"),
("mt5", "FlaxMT5Model"),
("opt", "FlaxOPTModel"),
("pegasus", "FlaxPegasusModel"),
("regnet", "FlaxRegNetModel"),
("resnet", "FlaxResNetModel"),
("roberta", "FlaxRobertaModel"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
("roformer", "FlaxRoFormerModel"),
("t5", "FlaxT5Model"),
("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
("vit", "FlaxViTModel"),
("wav2vec2", "FlaxWav2Vec2Model"),
("whisper", "FlaxWhisperModel"),
("xglm", "FlaxXGLMModel"),
("xlm-roberta", "FlaxXLMRobertaModel"),
]
)
lowercase__ = OrderedDict(
[
# Model for pre-training mapping
("albert", "FlaxAlbertForPreTraining"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForPreTraining"),
("big_bird", "FlaxBigBirdForPreTraining"),
("electra", "FlaxElectraForPreTraining"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("t5", "FlaxT5ForConditionalGeneration"),
("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
("whisper", "FlaxWhisperForConditionalGeneration"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ = OrderedDict(
[
# Model for Masked LM mapping
("albert", "FlaxAlbertForMaskedLM"),
("bart", "FlaxBartForConditionalGeneration"),
("bert", "FlaxBertForMaskedLM"),
("big_bird", "FlaxBigBirdForMaskedLM"),
("distilbert", "FlaxDistilBertForMaskedLM"),
("electra", "FlaxElectraForMaskedLM"),
("mbart", "FlaxMBartForConditionalGeneration"),
("roberta", "FlaxRobertaForMaskedLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
("roformer", "FlaxRoFormerForMaskedLM"),
("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
]
)
lowercase__ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("bart", "FlaxBartForConditionalGeneration"),
("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
("encoder-decoder", "FlaxEncoderDecoderModel"),
("longt5", "FlaxLongT5ForConditionalGeneration"),
("marian", "FlaxMarianMTModel"),
("mbart", "FlaxMBartForConditionalGeneration"),
("mt5", "FlaxMT5ForConditionalGeneration"),
("pegasus", "FlaxPegasusForConditionalGeneration"),
("t5", "FlaxT5ForConditionalGeneration"),
]
)
lowercase__ = OrderedDict(
[
# Model for Image-classsification
("beit", "FlaxBeitForImageClassification"),
("regnet", "FlaxRegNetForImageClassification"),
("resnet", "FlaxResNetForImageClassification"),
("vit", "FlaxViTForImageClassification"),
]
)
lowercase__ = OrderedDict(
[
("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
]
)
lowercase__ = OrderedDict(
[
# Model for Causal LM mapping
("bart", "FlaxBartForCausalLM"),
("bert", "FlaxBertForCausalLM"),
("big_bird", "FlaxBigBirdForCausalLM"),
("electra", "FlaxElectraForCausalLM"),
("gpt-sw3", "FlaxGPT2LMHeadModel"),
("gpt2", "FlaxGPT2LMHeadModel"),
("gpt_neo", "FlaxGPTNeoForCausalLM"),
("gptj", "FlaxGPTJForCausalLM"),
("opt", "FlaxOPTForCausalLM"),
("roberta", "FlaxRobertaForCausalLM"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
("xglm", "FlaxXGLMForCausalLM"),
("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
]
)
lowercase__ = OrderedDict(
[
# Model for Sequence Classification mapping
("albert", "FlaxAlbertForSequenceClassification"),
("bart", "FlaxBartForSequenceClassification"),
("bert", "FlaxBertForSequenceClassification"),
("big_bird", "FlaxBigBirdForSequenceClassification"),
("distilbert", "FlaxDistilBertForSequenceClassification"),
("electra", "FlaxElectraForSequenceClassification"),
("mbart", "FlaxMBartForSequenceClassification"),
("roberta", "FlaxRobertaForSequenceClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
("roformer", "FlaxRoFormerForSequenceClassification"),
("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
]
)
lowercase__ = OrderedDict(
[
# Model for Question Answering mapping
("albert", "FlaxAlbertForQuestionAnswering"),
("bart", "FlaxBartForQuestionAnswering"),
("bert", "FlaxBertForQuestionAnswering"),
("big_bird", "FlaxBigBirdForQuestionAnswering"),
("distilbert", "FlaxDistilBertForQuestionAnswering"),
("electra", "FlaxElectraForQuestionAnswering"),
("mbart", "FlaxMBartForQuestionAnswering"),
("roberta", "FlaxRobertaForQuestionAnswering"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
("roformer", "FlaxRoFormerForQuestionAnswering"),
("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
]
)
lowercase__ = OrderedDict(
[
# Model for Token Classification mapping
("albert", "FlaxAlbertForTokenClassification"),
("bert", "FlaxBertForTokenClassification"),
("big_bird", "FlaxBigBirdForTokenClassification"),
("distilbert", "FlaxDistilBertForTokenClassification"),
("electra", "FlaxElectraForTokenClassification"),
("roberta", "FlaxRobertaForTokenClassification"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
("roformer", "FlaxRoFormerForTokenClassification"),
("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
]
)
lowercase__ = OrderedDict(
[
# Model for Multiple Choice mapping
("albert", "FlaxAlbertForMultipleChoice"),
("bert", "FlaxBertForMultipleChoice"),
("big_bird", "FlaxBigBirdForMultipleChoice"),
("distilbert", "FlaxDistilBertForMultipleChoice"),
("electra", "FlaxElectraForMultipleChoice"),
("roberta", "FlaxRobertaForMultipleChoice"),
("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
("roformer", "FlaxRoFormerForMultipleChoice"),
("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
]
)
lowercase__ = OrderedDict(
[
("bert", "FlaxBertForNextSentencePrediction"),
]
)
lowercase__ = OrderedDict(
[
("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
("whisper", "FlaxWhisperForConditionalGeneration"),
]
)
lowercase__ = OrderedDict(
[
("whisper", "FlaxWhisperForAudioClassification"),
]
)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowercase__ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_MAPPING
lowercase__ = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="token classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="image classification"
)
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling")
class SCREAMING_SNAKE_CASE__ ( _BaseAutoModelClass ):
_lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowercase__ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling"
)
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int ):
__a : str = [1]
__a , __a , __a : str = 0, 0, 0
__a : Optional[Any] = ugly_nums[ia] * 2
__a : Optional[Any] = ugly_nums[ia] * 3
__a : Tuple = ugly_nums[ia] * 5
for _ in range(1 , _lowerCamelCase ):
__a : Dict = min(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ugly_nums.append(_lowerCamelCase )
if next_num == next_a:
ia += 1
__a : str = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__a : Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__a : List[str] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
lowercase__ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__(self , **_lowercase ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a : Optional[Any] = deprecated_arg[3:]
__a : Optional[int] = not kwargs.pop(_lowercase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__a : Union[str, Any] = kwargs.pop("""tpu_name""" , self.tpu_name )
__a : List[Any] = kwargs.pop("""device_idx""" , self.device_idx )
__a : Optional[Any] = kwargs.pop("""eager_mode""" , self.eager_mode )
__a : Tuple = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**_lowercase )
_lowerCAmelCase = field(
default=__snake_case , metadata={"help": "Name of TPU"} , )
_lowerCAmelCase = field(
default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , )
_lowerCAmelCase = field(default=__snake_case , metadata={"help": "Benchmark models in eager model."} )
_lowerCAmelCase = field(
default=__snake_case , metadata={
"help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."
} , )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
__a : List[Any] = None
if self.tpu:
try:
if self.tpu_name:
__a : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__a : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__a : List[Any] = None
return tpu
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__a : Optional[Any] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
__a : Union[str, Any] = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
__a : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.n_gpu > 0
| 63 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__a : Optional[int] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__a : List[str] = k.replace(""".attn""" , """.self_attn""" )
__a : Optional[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__a : Dict = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__a : List[str] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__a : Optional[Any] = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__a : Optional[Any] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
__a : Optional[Any] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__a : Union[str, Any] = sd.pop(_lowerCamelCase )
__a : List[Any] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__a : Union[str, Any] = v
lowercase__ = ["START"]
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] ):
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : int = model["""model"""]
__a : Tuple = BlenderbotConfig.from_json_file(_lowerCamelCase )
__a : Dict = BlenderbotForConditionalGeneration(_lowerCamelCase )
__a : List[Any] = m.model.state_dict().keys()
__a : Tuple = []
__a : Optional[Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__a : List[str] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__a : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowercase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 63 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 1 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __magic_name__ ( _lowerCamelCase : List[str] ):
__a : Any = []
for line in lines:
__a : Optional[Any] = re.sub(r"""#.*""" , """""" , _lowerCamelCase ) # remove comments
if line:
filtered_lines.append(_lowerCamelCase )
__a : List[str] = """\n""".join(_lowerCamelCase )
# Make a hash from all this code
__a : str = full_str.encode("""utf-8""" )
return shaaaa(_lowerCamelCase ).hexdigest()
# get importable module names and hash for caching
lowercase__ = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
lowercase__ = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
lowercase__ = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
lowercase__ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] ):
__a : Optional[int] = [[float("""inf""" ) for _ in range(_lowerCamelCase )] for _ in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__a : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_lowerCamelCase ):
# looping through rows of graph array
for i in range(_lowerCamelCase ):
# looping through columns of graph array
for j in range(_lowerCamelCase ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__a : Any = dist[i][k] + dist[k][j]
_print_dist(_lowerCamelCase , _lowerCamelCase )
return dist, v
if __name__ == "__main__":
lowercase__ = int(input("Enter number of vertices: "))
lowercase__ = int(input("Enter number of edges: "))
lowercase__ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
lowercase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
lowercase__ = int(input("Enter source:"))
lowercase__ = int(input("Enter destination:"))
lowercase__ = float(input("Enter weight:"))
lowercase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( ):
__a : Union[str, Any] = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
__a : List[Any] = 6
__a : List[Any] = 1
__a : Optional[Any] = 1_9_0_1
__a : Union[str, Any] = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__a : Optional[Any] = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
__a : Optional[Any] = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
__a : List[str] = day - days_per_month[month - 2]
if month > 1_2:
year += 1
__a : Optional[Any] = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / "src"
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"}
lowercase__ = "zero2"
lowercase__ = "zero3"
lowercase__ = [ZEROa, ZEROa]
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a : str = parameterized.to_safe_name("""_""".join(str(_lowerCamelCase ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@parameterized.expand(_lowercase , name_func=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
self.run_and_check(
stage=_lowercase , model=_lowercase , distributed=_lowercase , fpaa=_lowercase , )
@require_torch_multi_gpu
@parameterized.expand(_lowercase , name_func=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
self.run_and_check(
stage=_lowercase , model=_lowercase , distributed=_lowercase , fpaa=_lowercase , )
@parameterized.expand(_lowercase , name_func=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
self.run_and_check(
stage=_lowercase , model=_lowercase , distributed=_lowercase , fpaa=_lowercase , )
@require_torch_multi_gpu
@parameterized.expand(_lowercase , name_func=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
self.run_and_check(
stage=_lowercase , model=_lowercase , distributed=_lowercase , fpaa=_lowercase , )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
pass
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 10 , _lowercase = True , _lowercase = True , _lowercase = True , ):
'''simple docstring'''
__a : Dict = models[model]
__a : List[str] = self.run_trainer(
stage=_lowercase , model_name=_lowercase , eval_steps=_lowercase , num_train_epochs=1 , distributed=_lowercase , fpaa=_lowercase , )
self.do_checks(_lowercase )
return output_dir
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 10 , _lowercase = 1 , _lowercase = True , _lowercase = True , ):
'''simple docstring'''
__a : int = self.get_auto_remove_tmp_dir("""./xxx""" , after=_lowercase )
__a : List[Any] = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a : List[Any] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
__a : Any = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
__a : Dict = self.get_launcher(_lowercase )
__a : Tuple = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env() )
return output_dir
def lowerCAmelCase__(self , _lowercase=False ):
'''simple docstring'''
__a : Tuple = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 63 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]=8 ):
__a : List[str] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__a : Any = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=5_1_2 , _lowerCamelCase : str=5_1_2 ):
__a : Dict = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
__a : Any = np.array(pil_image.convert("""RGB""" ) )
__a : Any = arr.astype(np.floataa ) / 1_27.5 - 1
__a : Any = np.transpose(_lowerCamelCase , [2, 0, 1] )
__a : Dict = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
__a : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = min(int(num_inference_steps * strength ) , _lowercase )
__a : str = max(num_inference_steps - init_timestep , 0 )
__a : List[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__a : Optional[Any] = image.to(device=_lowercase , dtype=_lowercase )
__a : Optional[int] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
__a : int = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
elif isinstance(_lowercase , _lowercase ):
__a : int = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
__a : Optional[Any] = torch.cat(_lowercase , dim=0 )
else:
__a : Optional[Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
__a : Union[str, Any] = self.movq.config.scaling_factor * init_latents
__a : Dict = torch.cat([init_latents] , dim=0 )
__a : Dict = init_latents.shape
__a : Optional[int] = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
__a : Any = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__a : Any = init_latents
return latents
def lowerCAmelCase__(self , _lowercase=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__a : Any = torch.device(F'''cuda:{gpu_id}''' )
__a : List[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__a : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__a : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
__a , __a : Optional[int] = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
__a : Tuple = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__(self ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__(self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ):
'''simple docstring'''
__a : List[Any] = self._execution_device
__a : List[str] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
__a : Tuple = torch.cat(_lowercase , dim=0 )
__a : Union[str, Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
__a : Optional[int] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
__a : int = image_embeds.repeat_interleave(_lowercase , dim=0 )
__a : Union[str, Any] = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
__a : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
__a : List[str] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'''Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor''' )
__a : str = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
__a : Optional[int] = image.to(dtype=image_embeds.dtype , device=_lowercase )
__a : Optional[Any] = self.movq.encode(_lowercase )["""latents"""]
__a : List[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
__a , __a : Optional[int] = self.get_timesteps(_lowercase , _lowercase , _lowercase )
__a : Optional[int] = timesteps[:1].repeat(batch_size * num_images_per_prompt )
__a , __a : int = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
__a : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
__a : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a : Tuple = {"""image_embeds""": image_embeds}
__a : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
__a , __a : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
__a , __a : int = noise_pred.chunk(2 )
__a , __a : int = variance_pred.chunk(2 )
__a : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__a : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__a , __a : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__a : Optional[int] = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
__a : int = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__a : List[Any] = image * 0.5 + 0.5
__a : Union[str, Any] = image.clamp(0 , 1 )
__a : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__a : Optional[int] = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 63 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = size
__a : Dict = [0] * size
__a : Tuple = [0] * size
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = value
while index < self.size:
__a : str = self.get_prev(_lowercase ) + 1
if current_left_border == index:
__a : Union[str, Any] = value
else:
__a : str = max(_lowercase , _lowercase , _lowercase )
__a : Optional[int] = self.get_next(_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
__a : int = 0
while left <= right:
__a : int = self.get_prev(_lowercase )
if left <= current_left:
__a : Optional[Any] = max(_lowercase , self.tree[right] )
__a : List[str] = current_left
else:
__a : List[str] = max(_lowercase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
# Initialise PyTorch model
__a : Optional[int] = AlbertConfig.from_json_file(_lowerCamelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
__a : str = AlbertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--albert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained ALBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
lowercase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __magic_name__ ( ):
__a : Tuple = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__a : Dict = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
DownloadCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
RunCommand.register_subcommand(_lowerCamelCase )
ServeCommand.register_subcommand(_lowerCamelCase )
UserCommands.register_subcommand(_lowerCamelCase )
AddNewModelCommand.register_subcommand(_lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCamelCase )
LfsCommands.register_subcommand(_lowerCamelCase )
PTtoTFCommand.register_subcommand(_lowerCamelCase )
# Let's go
__a : Union[str, Any] = parser.parse_args()
if not hasattr(_lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__a : Optional[Any] = args.func(_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = ["image_processor", "tokenizer"]
_lowerCAmelCase = "ChineseCLIPImageProcessor"
_lowerCAmelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__(self , _lowercase=None , _lowercase=None , **_lowercase ):
'''simple docstring'''
__a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowercase , )
__a : Any = kwargs.pop("""feature_extractor""" )
__a : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowercase , _lowercase )
__a : Optional[Any] = self.image_processor
def __call__(self , _lowercase=None , _lowercase=None , _lowercase=None , **_lowercase ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__a : Optional[int] = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
__a : Tuple = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
__a : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowerCAmelCase__(self , *_lowercase , **_lowercase ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.tokenizer.model_input_names
__a : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowercase , )
return self.image_processor_class
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : list[int] ): # This function is recursive
__a : str = len(_lowerCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__a : Dict = array[0]
__a : Any = False
__a : Any = 1
__a : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
__a : str = True
__a : str = [element for element in array[i:] if element >= array[i]]
__a : Optional[int] = longest_subsequence(_lowerCamelCase )
if len(_lowerCamelCase ) > len(_lowerCamelCase ):
__a : Dict = temp_array
else:
i += 1
__a : int = [element for element in array[1:] if element >= pivot]
__a : List[str] = [pivot, *longest_subsequence(_lowerCamelCase )]
if len(_lowerCamelCase ) > len(_lowerCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=False , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
'''simple docstring'''
__a : str = parent
__a : Optional[int] = batch_size
__a : Union[str, Any] = seq_length
__a : Dict = is_training
__a : Optional[Any] = use_input_mask
__a : Optional[int] = use_token_type_ids
__a : int = use_labels
__a : Tuple = vocab_size
__a : Optional[int] = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : str = intermediate_size
__a : List[str] = hidden_act
__a : Optional[Any] = hidden_dropout_prob
__a : Optional[int] = attention_probs_dropout_prob
__a : Any = max_position_embeddings
__a : str = type_vocab_size
__a : Optional[Any] = type_sequence_label_size
__a : Optional[int] = initializer_range
__a : Union[str, Any] = num_labels
__a : List[str] = num_choices
__a : List[str] = scope
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[str] = None
if self.use_input_mask:
__a : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Any = None
__a : Optional[Any] = None
__a : Optional[Any] = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__a : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = DistilBertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : List[Any] = model(_lowercase , _lowercase )
__a : List[Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[Any] = DistilBertForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Optional[int] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = DistilBertForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Tuple = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.num_labels
__a : Dict = DistilBertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__a : Any = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self.num_labels
__a : List[Any] = DistilBertForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self.num_choices
__a : List[Any] = DistilBertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : Union[str, Any] = config_and_inputs
__a : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowerCAmelCase = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = DistilBertModelTester(self )
__a : Tuple = ConfigTester(self , config_class=_lowercase , dim=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Any = DistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__a : List[str] = True
__a : Union[str, Any] = model_class(config=_lowercase )
__a : List[Any] = self._prepare_for_class(_lowercase , _lowercase )
__a : Optional[int] = torch.jit.trace(
_lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , """traced_model.pt""" ) )
__a : Any = torch.jit.load(os.path.join(_lowercase , """traced_model.pt""" ) , map_location=_lowercase )
loaded(inputs_dict["""input_ids"""].to(_lowercase ) , inputs_dict["""attention_mask"""].to(_lowercase ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__a : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__a : Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : Dict = model(_lowercase , attention_mask=_lowercase )[0]
__a : Tuple = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
__a : Any = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
from collections import deque
def __magic_name__ ( _lowerCamelCase : int ):
__a : Optional[Any] = len(_lowerCamelCase )
__a : int = deque()
__a : List[str] = [False for _ in range(_lowerCamelCase )]
__a : int = [-1 for _ in range(_lowerCamelCase )]
__a : Optional[Any] = index_of[:]
def strong_connect(_lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Dict ):
__a : Tuple = index # the number when this node is seen
__a : List[str] = index # lowest rank node reachable from here
index += 1
stack.append(_lowerCamelCase )
__a : Union[str, Any] = True
for w in g[v]:
if index_of[w] == -1:
__a : Optional[int] = strong_connect(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
__a : Optional[int] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
__a : Union[str, Any] = []
__a : Any = stack.pop()
__a : Optional[int] = False
component.append(_lowerCamelCase )
while w != v:
__a : str = stack.pop()
__a : Dict = False
component.append(_lowerCamelCase )
components.append(_lowerCamelCase )
return index
__a : Union[str, Any] = []
for v in range(_lowerCamelCase ):
if index_of[v] == -1:
strong_connect(_lowerCamelCase , 0 , _lowerCamelCase )
return components
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] ):
__a : int = [[] for _ in range(_lowerCamelCase )]
for u, v in edges:
g[u].append(_lowerCamelCase )
return g
if __name__ == "__main__":
# Test
lowercase__ = 7
lowercase__ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
lowercase__ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
lowercase__ = [(u, v) for u, v in zip(source, target)]
lowercase__ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int = 1_0_0_0 ):
__a : Dict = 2**power
__a : Optional[int] = 0
while n:
__a , __a : int = r + n % 1_0, n // 1_0
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowercase__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowercase__ = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return FSMTTokenizer.from_pretrained(_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : int = FSMTForConditionalGeneration.from_pretrained(_lowercase ).to(_lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[Any] = F'''facebook/wmt19-{pair}'''
__a : Optional[Any] = self.get_tokenizer(_lowercase )
__a : int = self.get_model(_lowercase )
__a : Optional[int] = bleu_data[pair]["""src"""]
__a : Optional[Any] = bleu_data[pair]["""tgt"""]
__a : Dict = tokenizer(_lowercase , return_tensors="""pt""" , truncation=_lowercase , padding="""longest""" ).to(_lowercase )
__a : List[str] = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__a : Optional[Any] = tokenizer.batch_decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
__a : Optional[int] = calculate_bleu(_lowercase , _lowercase )
print(_lowercase )
self.assertGreaterEqual(scores["""bleu"""] , _lowercase )
| 63 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=3 , _lowercase=32 , _lowercase=3 , _lowercase=10 , _lowercase=[10, 20, 30, 40] , _lowercase=[1, 1, 2, 1] , _lowercase=True , _lowercase=True , _lowercase="relu" , _lowercase=3 , _lowercase=None , ):
'''simple docstring'''
__a : Tuple = parent
__a : str = batch_size
__a : Any = image_size
__a : int = num_channels
__a : Union[str, Any] = embeddings_size
__a : int = hidden_sizes
__a : Any = depths
__a : List[str] = is_training
__a : int = use_labels
__a : int = hidden_act
__a : Optional[int] = num_labels
__a : int = scope
__a : List[Any] = len(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Any = None
if self.use_labels:
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = RegNetModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : List[Any] = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Tuple = self.num_labels
__a : List[str] = RegNetForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
__a : Tuple = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
_lowerCAmelCase = (
{"feature-extraction": RegNetModel, "image-classification": RegNetForImageClassification}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = RegNetModelTester(self )
__a : Any = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__(self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_lowercase )
__a : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : List[Any] = [*signature.parameters.keys()]
__a : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(config=_lowercase )
for name, module in model.named_modules():
if isinstance(_lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : str = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__a : Optional[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__a : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : Dict = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a : int = layer_type
__a : List[str] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : str = RegNetModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __magic_name__ ( ):
__a : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_lowercase )
__a : Optional[Any] = self.default_image_processor
__a : str = prepare_img()
__a : List[str] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**_lowercase )
# verify the logits
__a : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Optional[Any] = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
lowercase__ = 5
lowercase__ = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = SpeechaTextTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : Dict = sp.SentencePieceProcessor()
spm_model.Load(_lowercase )
__a : int = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_lowercase ) )]
__a : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Union[str, Any] = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
__a : Dict = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = """<pad>"""
__a : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_lowercase ) , 1001 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__a : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowercase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase ) , [289, 50, 14, 174, 386] , )
__a : Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__a : Optional[Any] = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__a : Optional[Any] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = "valhalla/s2t_mustc_multilinguial_medium"
_lowerCAmelCase = "C'est trop cool"
_lowerCAmelCase = "Esto es genial"
@classmethod
def lowerCAmelCase__(cls ):
'''simple docstring'''
__a : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertIn(_lowercase , self.tokenizer.all_special_ids )
__a : List[Any] = [ES_CODE, 4, 1601, 47, 7647, 2]
__a : int = self.tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
__a : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertNotIn(self.tokenizer.eos_token , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = """fr"""
__a : List[str] = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _lowercase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__a : int = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = data
__a : Tuple = self
__a : str = 0
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self ):
'''simple docstring'''
__a : dict[T, DisjointSetTreeNode[T]] = {}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = DisjointSetTreeNode(_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self.map[data]
if elem_ref != elem_ref.parent:
__a : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if nodea.rank > nodea.rank:
__a : List[str] = nodea
else:
__a : int = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
self.link(self.find_set(_lowercase ) , self.find_set(_lowercase ) )
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
def __init__(self ):
'''simple docstring'''
__a : dict[T, dict[T, int]] = {}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if node not in self.connections:
__a : List[str] = {}
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
self.add_node(_lowercase )
self.add_node(_lowercase )
__a : Optional[Any] = weight
__a : Tuple = weight
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = []
__a : Tuple = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _lowercase : x[2] )
# creating the disjoint set
__a : Union[str, Any] = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_lowercase )
# MST generation
__a : Optional[int] = 0
__a : Optional[Any] = 0
__a : List[Any] = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
__a , __a , __a : Any = edges[index]
index += 1
__a : List[str] = disjoint_set.find_set(_lowercase )
__a : Any = disjoint_set.find_set(_lowercase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_lowercase , _lowercase , _lowercase )
disjoint_set.union(_lowercase , _lowercase )
return graph
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
"""simple docstring"""
from collections import namedtuple
lowercase__ = namedtuple("from_to", "from_ to")
lowercase__ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : str , _lowerCamelCase : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ """, """.join(_lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ """, """.join(_lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int ):
__a , __a : Tuple = position
__a : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__a : List[Any] = []
for position in positions:
__a , __a : int = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def __magic_name__ ( _lowerCamelCase : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def __magic_name__ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : int ):
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
__a , __a : int = position
if board[y][x] == 0:
__a : List[str] = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
__a : str = 0
return False
def __magic_name__ ( _lowerCamelCase : int ):
__a : Dict = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__a : List[Any] = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
__a : List[str] = 0
__a : List[str] = F'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase__ = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : int ):
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(_lowerCamelCase ) , version.parse(_lowerCamelCase ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
__a : Optional[int] = F'''\n{hint}''' if hint is not None else """"""
# non-versioned check
if re.match(r"""^[\w_\-\d]+$""" , _lowerCamelCase ):
__a , __a , __a : str = requirement, None, None
else:
__a : Dict = re.findall(r"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F''' got {requirement}''' )
__a , __a : Union[str, Any] = match[0]
__a : Dict = want_full.split(""",""" ) # there could be multiple requirements
__a : List[str] = {}
for w in want_range:
__a : Dict = re.findall(r"""^([\s!=<>]{1,2})(.+)""" , _lowerCamelCase )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F''' but got {requirement}''' )
__a , __a : Union[str, Any] = match[0]
__a : Any = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
__a : Optional[Any] = """.""".join([str(_lowerCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return
# check if any version is installed
try:
__a : Any = importlib.metadata.version(_lowerCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : int ):
__a : Optional[int] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(_lowerCamelCase , _lowerCamelCase )
| 63 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 1 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
@flax_register_to_config
class SCREAMING_SNAKE_CASE__ ( nn.Module , __snake_case , __snake_case ):
_lowerCAmelCase = 3_2
_lowerCAmelCase = 4
_lowerCAmelCase = 4
_lowerCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_lowerCAmelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
_lowerCAmelCase = False
_lowerCAmelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
_lowerCAmelCase = 2
_lowerCAmelCase = 8
_lowerCAmelCase = None
_lowerCAmelCase = 1_2_8_0
_lowerCAmelCase = 0.0
_lowerCAmelCase = False
_lowerCAmelCase = jnp.floataa
_lowerCAmelCase = True
_lowerCAmelCase = 0
_lowerCAmelCase = False
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = (1, self.in_channels, self.sample_size, self.sample_size)
__a : Optional[int] = jnp.zeros(_lowercase , dtype=jnp.floataa )
__a : List[str] = jnp.ones((1,) , dtype=jnp.intaa )
__a : str = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__a , __a : List[str] = jax.random.split(_lowercase )
__a : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(_lowercase , _lowercase , _lowercase , _lowercase )["params"]
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.block_out_channels
__a : List[Any] = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__a : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
__a : List[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__a : Optional[Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__a : Optional[Any] = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype )
__a : str = self.only_cross_attention
if isinstance(_lowercase , _lowercase ):
__a : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowercase , _lowercase ):
__a : int = (num_attention_heads,) * len(self.down_block_types )
# down
__a : Union[str, Any] = []
__a : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__a : List[Any] = output_channel
__a : Dict = block_out_channels[i]
__a : Dict = i == len(_lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__a : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : Optional[Any] = FlaxDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowercase )
__a : Optional[int] = down_blocks
# mid
__a : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__a : Optional[int] = []
__a : Union[str, Any] = list(reversed(_lowercase ) )
__a : List[str] = list(reversed(_lowercase ) )
__a : Tuple = list(reversed(_lowercase ) )
__a : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__a : Union[str, Any] = output_channel
__a : Tuple = reversed_block_out_channels[i]
__a : Optional[int] = reversed_block_out_channels[min(i + 1 , len(_lowercase ) - 1 )]
__a : List[str] = i == len(_lowercase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__a : List[Any] = FlaxCrossAttnUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__a : Any = FlaxUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowercase )
__a : Union[str, Any] = output_channel
__a : int = up_blocks
# out
__a : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__(self , _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase = True , _lowercase = False , ):
'''simple docstring'''
if not isinstance(_lowercase , jnp.ndarray ):
__a : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__a : Union[str, Any] = timesteps.astype(dtype=jnp.floataa )
__a : List[str] = jnp.expand_dims(_lowercase , 0 )
__a : Any = self.time_proj(_lowercase )
__a : Tuple = self.time_embedding(_lowercase )
# 2. pre-process
__a : Optional[Any] = jnp.transpose(_lowercase , (0, 2, 3, 1) )
__a : List[str] = self.conv_in(_lowercase )
# 3. down
__a : Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowercase , _lowercase ):
__a , __a : Tuple = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
else:
__a , __a : Tuple = down_block(_lowercase , _lowercase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__a : Dict = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowercase , _lowercase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__a : Dict = new_down_block_res_samples
# 4. mid
__a : Dict = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__a : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
__a : Optional[Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowercase , _lowercase ):
__a : List[str] = up_block(
_lowercase , temb=_lowercase , encoder_hidden_states=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train , )
else:
__a : Tuple = up_block(_lowercase , temb=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train )
# 6. post-process
__a : Optional[Any] = self.conv_norm_out(_lowercase )
__a : Optional[Any] = nn.silu(_lowercase )
__a : Any = self.conv_out(_lowercase )
__a : int = jnp.transpose(_lowercase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowercase )
| 63 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 1 |
"""simple docstring"""
from typing import Any
def __magic_name__ ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : dict , _lowerCamelCase : dict , _lowerCamelCase : dict , ):
_validation(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# Creates data structures and fill initial step
__a : dict = {}
__a : dict = {}
for state in states_space:
__a : List[str] = observations_space[0]
__a : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__a : Optional[Any] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_lowerCamelCase ) ):
__a : List[str] = observations_space[o]
__a : str = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__a : List[Any] = """"""
__a : Dict = -1
for k_state in states_space:
__a : Dict = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__a : Optional[Any] = probability
__a : Tuple = k_state
# Update probabilities and pointers dicts
__a : Dict = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__a : Tuple = arg_max
# The final observation
__a : Optional[int] = observations_space[len(_lowerCamelCase ) - 1]
# argmax for given final observation
__a : List[Any] = """"""
__a : Tuple = -1
for k_state in states_space:
__a : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
__a : Any = probability
__a : Dict = k_state
__a : Tuple = arg_max
# Process pointers backwards
__a : Optional[Any] = last_state
__a : List[str] = []
for o in range(len(_lowerCamelCase ) - 1 , -1 , -1 ):
result.append(_lowerCamelCase )
__a : Optional[int] = pointers[previous, observations_space[o]]
result.reverse()
return result
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , ):
_validate_not_empty(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
_validate_lists(_lowerCamelCase , _lowerCamelCase )
_validate_dicts(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("""There's an empty parameter""" )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any ):
_validate_list(_lowerCamelCase , """observations_space""" )
_validate_list(_lowerCamelCase , """states_space""" )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : str ):
if not isinstance(_object , _lowerCamelCase ):
__a : Optional[int] = F'''{var_name} must be a list'''
raise ValueError(_lowerCamelCase )
else:
for x in _object:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
__a : str = F'''{var_name} must be a list of strings'''
raise ValueError(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Any , ):
_validate_dict(_lowerCamelCase , """initial_probabilities""" , _lowerCamelCase )
_validate_nested_dict(_lowerCamelCase , """transition_probabilities""" )
_validate_nested_dict(_lowerCamelCase , """emission_probabilities""" )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : str ):
_validate_dict(_object , _lowerCamelCase , _lowerCamelCase )
for x in _object.values():
_validate_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : str , _lowerCamelCase : type , _lowerCamelCase : bool = False ):
if not isinstance(_object , _lowerCamelCase ):
__a : Dict = F'''{var_name} must be a dict'''
raise ValueError(_lowerCamelCase )
if not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for x in _object ):
__a : Optional[int] = F'''{var_name} all keys must be strings'''
raise ValueError(_lowerCamelCase )
if not all(isinstance(_lowerCamelCase , _lowerCamelCase ) for x in _object.values() ):
__a : Optional[Any] = """nested dictionary """ if nested else """"""
__a : Tuple = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "openai-gpt"
_lowerCAmelCase = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , _lowercase=40478 , _lowercase=512 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1e-5 , _lowercase=0.02 , _lowercase="cls_index" , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=0.1 , **_lowercase , ):
'''simple docstring'''
__a : Union[str, Any] = vocab_size
__a : str = n_positions
__a : Optional[Any] = n_embd
__a : int = n_layer
__a : List[Any] = n_head
__a : Tuple = afn
__a : Tuple = resid_pdrop
__a : Optional[int] = embd_pdrop
__a : Tuple = attn_pdrop
__a : Optional[int] = layer_norm_epsilon
__a : Dict = initializer_range
__a : Union[str, Any] = summary_type
__a : str = summary_use_proj
__a : Union[str, Any] = summary_activation
__a : List[Any] = summary_first_dropout
__a : str = summary_proj_to_labels
super().__init__(**_lowercase )
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class SCREAMING_SNAKE_CASE__ ( yaml.SafeLoader ):
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__a : Tuple = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
__a : List[Any] = Counter(_lowercase )
__a : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def lowerCAmelCase__(self , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Optional[Any] = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def __magic_name__ ( _lowerCamelCase : str ):
__a : Optional[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__a : Union[str, Any] = full_content[1:].index("""---""" ) + 1
__a : int = """\n""".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_lowerCamelCase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
# class attributes
_lowerCAmelCase = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def lowerCAmelCase__(cls , _lowercase ):
'''simple docstring'''
with open(_lowercase , encoding="""utf-8""" ) as readme_file:
__a , __a : Optional[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if path.exists():
with open(_lowercase , encoding="""utf-8""" ) as readme_file:
__a : str = readme_file.read()
else:
__a : List[str] = None
__a : Optional[Any] = self._to_readme(_lowercase )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as readme_file:
readme_file.write(_lowercase )
def lowerCAmelCase__(self , _lowercase = None ):
'''simple docstring'''
if readme_content is not None:
__a , __a : Tuple = _split_yaml_from_readme(_lowercase )
__a : str = """---\n""" + self.to_yaml_string() + """---\n""" + content
else:
__a : Union[str, Any] = """---\n""" + self.to_yaml_string() + """---\n"""
return full_content
@classmethod
def lowerCAmelCase__(cls , _lowercase ):
'''simple docstring'''
__a : int = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__a : Tuple = {
(key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="""utf-8""" , ).decode("""utf-8""" )
lowercase__ = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowercase__ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
lowercase__ = ap.parse_args()
lowercase__ = Path(args.readme_filepath)
lowercase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , ):
'''simple docstring'''
__a : List[Any] = parent
__a : Optional[int] = 13
__a : Optional[int] = 7
__a : Any = True
__a : Optional[int] = True
__a : Any = False
__a : str = True
__a : Optional[Any] = 99
__a : Dict = 32
__a : List[Any] = 2
__a : int = 4
__a : List[str] = 37
__a : Dict = """gelu"""
__a : Any = 0.1
__a : Tuple = 0.1
__a : Tuple = 512
__a : Optional[int] = 16
__a : Any = 2
__a : Optional[int] = 0.02
__a : str = 3
__a : Optional[int] = 4
__a : List[str] = None
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : List[str] = None
if self.use_input_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : str = None
__a : List[Any] = None
__a : Union[str, Any] = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__a : List[str] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = TFDistilBertModel(config=_lowercase )
__a : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : str = model(_lowercase )
__a : Optional[Any] = [input_ids, input_mask]
__a : Any = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TFDistilBertForMaskedLM(config=_lowercase )
__a : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : Any = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = TFDistilBertForQuestionAnswering(config=_lowercase )
__a : str = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
}
__a : Tuple = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.num_labels
__a : Union[str, Any] = TFDistilBertForSequenceClassification(_lowercase )
__a : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : Tuple = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = self.num_choices
__a : List[Any] = TFDistilBertForMultipleChoice(_lowercase )
__a : List[str] = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__a : int = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
__a : int = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
}
__a : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = self.num_labels
__a : Tuple = TFDistilBertForTokenClassification(_lowercase )
__a : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
__a : List[str] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) : List[str] = config_and_inputs
__a : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCAmelCase = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = TFDistilBertModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_lowercase , dim=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
__a : Any = TFDistilBertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = TFDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
__a : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a : Optional[Any] = model(_lowercase )[0]
__a : List[Any] = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
__a : int = tf.constant(
[
[
[0.1926_1885, -0.1373_2955, 0.411_9799],
[0.2215_0156, -0.0742_2661, 0.3903_7204],
[0.2275_6018, -0.089_6414, 0.370_1467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 63 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowercase__ = 1.054_571_817e-34 # unit of ℏ : J * s
lowercase__ = 3e8 # unit of c : m * s^-1
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
__a : int = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__a : int = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__a : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 1 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase__ = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase__ = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( _lowerCamelCase : List[str] ):
__a : int = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_lowerCamelCase )[0]
@deprecated(_lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def __magic_name__ ( _lowerCamelCase : Optional[int] ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_lowerCamelCase ) as bytestream:
__a : int = _readaa(_lowerCamelCase )
if magic != 2_0_5_1:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__a : List[Any] = _readaa(_lowerCamelCase )
__a : List[Any] = _readaa(_lowerCamelCase )
__a : int = _readaa(_lowerCamelCase )
__a : int = bytestream.read(rows * cols * num_images )
__a : Dict = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta )
__a : Any = data.reshape(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 1 )
return data
@deprecated(_lowerCamelCase , """Please use tf.one_hot on tensors.""" )
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ):
__a : List[str] = labels_dense.shape[0]
__a : int = numpy.arange(_lowerCamelCase ) * num_classes
__a : Union[str, Any] = numpy.zeros((num_labels, num_classes) )
__a : int = 1
return labels_one_hot
@deprecated(_lowerCamelCase , """Please use tf.data to implement this functionality.""" )
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=1_0 ):
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=_lowerCamelCase ) as bytestream:
__a : Optional[Any] = _readaa(_lowerCamelCase )
if magic != 2_0_4_9:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__a : Union[str, Any] = _readaa(_lowerCamelCase )
__a : List[str] = bytestream.read(_lowerCamelCase )
__a : List[Any] = numpy.frombuffer(_lowerCamelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_lowerCamelCase , _lowerCamelCase )
return labels
class SCREAMING_SNAKE_CASE__ :
@deprecated(
_lowercase , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__(self , _lowercase , _lowercase , _lowercase=False , _lowercase=False , _lowercase=dtypes.floataa , _lowercase=True , _lowercase=None , ):
'''simple docstring'''
__a , __a : List[str] = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__a : Any = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__a : Union[str, Any] = 10000
__a : Optional[Any] = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__a : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a : List[Any] = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a : Dict = images.astype(numpy.floataa )
__a : List[Any] = numpy.multiply(_lowercase , 1.0 / 255.0 )
__a : Dict = images
__a : Dict = labels
__a : int = 0
__a : Optional[int] = 0
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._images
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._labels
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._num_examples
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._epochs_completed
def lowerCAmelCase__(self , _lowercase , _lowercase=False , _lowercase=True ):
'''simple docstring'''
if fake_data:
__a : int = [1] * 784
__a : Dict = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
__a : Optional[Any] = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a : Tuple = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
__a : Dict = self.images[perma]
__a : Optional[int] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a : Union[str, Any] = self._num_examples - start
__a : Union[str, Any] = self._images[start : self._num_examples]
__a : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a : Union[str, Any] = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
__a : Union[str, Any] = self.images[perm]
__a : int = self.labels[perm]
# Start next epoch
__a : List[str] = 0
__a : Dict = batch_size - rest_num_examples
__a : Tuple = self._index_in_epoch
__a : Any = self._images[start:end]
__a : List[Any] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__a : Optional[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_lowerCamelCase , """Please write your own downloading logic.""" )
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] ):
if not gfile.Exists(_lowerCamelCase ):
gfile.MakeDirs(_lowerCamelCase )
__a : List[str] = os.path.join(_lowerCamelCase , _lowerCamelCase )
if not gfile.Exists(_lowerCamelCase ):
urllib.request.urlretrieve(_lowerCamelCase , _lowerCamelCase ) # noqa: S310
with gfile.GFile(_lowerCamelCase ) as f:
__a : List[str] = f.size()
print("""Successfully downloaded""" , _lowerCamelCase , _lowerCamelCase , """bytes.""" )
return filepath
@deprecated(
_lowerCamelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Union[str, Any]=dtypes.floataa , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : List[Any]=5_0_0_0 , _lowerCamelCase : Tuple=None , _lowerCamelCase : Tuple=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_lowerCamelCase , one_hot=_lowerCamelCase , dtype=_lowerCamelCase , seed=_lowerCamelCase )
__a : Any = fake()
__a : Optional[int] = fake()
__a : str = fake()
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase )
if not source_url: # empty string check
__a : str = DEFAULT_SOURCE_URL
__a : int = """train-images-idx3-ubyte.gz"""
__a : Union[str, Any] = """train-labels-idx1-ubyte.gz"""
__a : str = """t10k-images-idx3-ubyte.gz"""
__a : Optional[int] = """t10k-labels-idx1-ubyte.gz"""
__a : Optional[Any] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_images_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
__a : List[Any] = _extract_images(_lowerCamelCase )
__a : str = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + train_labels_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
__a : Optional[int] = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase )
__a : Optional[int] = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_images_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
__a : Optional[Any] = _extract_images(_lowerCamelCase )
__a : Any = _maybe_download(
_lowerCamelCase , _lowerCamelCase , source_url + test_labels_file )
with gfile.Open(_lowerCamelCase , """rb""" ) as f:
__a : str = _extract_labels(_lowerCamelCase , one_hot=_lowerCamelCase )
if not 0 <= validation_size <= len(_lowerCamelCase ):
__a : Dict = (
"""Validation size should be between 0 and """
F'''{len(_lowerCamelCase )}. Received: {validation_size}.'''
)
raise ValueError(_lowerCamelCase )
__a : int = train_images[:validation_size]
__a : Optional[Any] = train_labels[:validation_size]
__a : Optional[Any] = train_images[validation_size:]
__a : Tuple = train_labels[validation_size:]
__a : int = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__a : List[str] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
__a : List[Any] = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
__a : Any = _DataSet(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
return _Datasets(train=_lowerCamelCase , validation=_lowerCamelCase , test=_lowerCamelCase )
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __magic_name__ ( _lowerCamelCase : str ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase ) )]
def __magic_name__ ( _lowerCamelCase : str ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
__a : int = all_rotations(_lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
__a : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCamelCase ),
}
return response
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
__a : int = int(_lowerCamelCase )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(_lowerCamelCase ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
__a : Tuple = [""""""] * len(_lowerCamelCase )
for _ in range(len(_lowerCamelCase ) ):
for i in range(len(_lowerCamelCase ) ):
__a : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowercase__ = "Provide a string that I will generate its BWT transform: "
lowercase__ = input(entry_msg).strip()
lowercase__ = bwt_transform(s)
print(
f'Burrows Wheeler transform for string \'{s}\' results '
f'in \'{result["bwt_string"]}\''
)
lowercase__ = reverse_bwt(result["bwt_string"], result["idx_original_string"])
print(
f'Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '
f'we get original string \'{original_string}\''
)
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase__ = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
lowercase__ = {
"facebook/bart-base": 1024,
"facebook/bart-large": 1024,
"facebook/bart-large-mnli": 1024,
"facebook/bart-large-cnn": 1024,
"facebook/bart-large-xsum": 1024,
"yjernite/bart_eli5": 1024,
}
@lru_cache()
def __magic_name__ ( ):
__a : Optional[int] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__a : List[str] = bs[:]
__a : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__a : Tuple = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : Tuple = set()
__a : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a : Union[str, Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__(self , _lowercase , _lowercase , _lowercase="replace" , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a : Optional[Any] = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
__a : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
__a : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
__a : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
__a : int = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
__a : Any = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a : str = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
__a : Optional[int] = json.load(_lowercase )
__a : List[str] = {v: k for k, v in self.encoder.items()}
__a : Optional[int] = errors # how to handle errors in decoding
__a : int = bytes_to_unicode()
__a : str = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
__a : int = merges_handle.read().split("""\n""" )[1:-1]
__a : Dict = [tuple(merge.split() ) for merge in bpe_merges]
__a : Dict = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Dict = {}
__a : List[str] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a : Union[str, Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase__(self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a : int = tuple(_lowercase )
__a : str = get_pairs(_lowercase )
if not pairs:
return token
while True:
__a : Any = min(_lowercase , key=lambda _lowercase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__a , __a : Tuple = bigram
__a : int = []
__a : Union[str, Any] = 0
while i < len(_lowercase ):
try:
__a : Union[str, Any] = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__a : List[str] = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__a : Any = tuple(_lowercase )
__a : List[Any] = new_word
if len(_lowercase ) == 1:
break
else:
__a : Optional[int] = get_pairs(_lowercase )
__a : List[Any] = """ """.join(_lowercase )
__a : Union[str, Any] = word
return word
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : str = []
for token in re.findall(self.pat , _lowercase ):
__a : Union[str, Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(""" """ ) )
return bpe_tokens
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return self.decoder.get(_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[int] = """""".join(_lowercase )
__a : Any = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a : Optional[Any] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
__a : List[Any] = 0
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
__a : Tuple = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
__a : Tuple = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Union[str, Any] = [self.sep_token_id]
__a : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__(self , _lowercase , _lowercase=False , **_lowercase ):
'''simple docstring'''
__a : Any = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
__a : Optional[int] = """ """ + text
return (text, kwargs)
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = val
__a : Any = None
__a : Optional[Any] = None
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
__a : Any = Node(_lowercase )
else:
self.left.insert(_lowercase )
elif val > self.val:
if self.right is None:
__a : str = Node(_lowercase )
else:
self.right.insert(_lowercase )
else:
__a : Tuple = val
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ):
# Recursive traversal
if root:
inorder(root.left , _lowerCamelCase )
res.append(root.val )
inorder(root.right , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[int] ):
# Build BST
if len(_lowerCamelCase ) == 0:
return arr
__a : Optional[int] = Node(arr[0] )
for i in range(1 , len(_lowerCamelCase ) ):
root.insert(arr[i] )
# Traverse BST in order.
__a : Tuple = []
inorder(_lowerCamelCase , _lowerCamelCase )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "timesformer"
def __init__(self , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=8 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-6 , _lowercase=True , _lowercase="divided_space_time" , _lowercase=0 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Union[str, Any] = image_size
__a : Optional[Any] = patch_size
__a : Union[str, Any] = num_channels
__a : Optional[int] = num_frames
__a : Optional[int] = hidden_size
__a : Any = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Dict = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Tuple = layer_norm_eps
__a : Optional[Any] = qkv_bias
__a : Union[str, Any] = attention_type
__a : Optional[Any] = drop_path_rate
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = GPTSanJapaneseTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = {"do_clean_text": False, "add_prefix_space": False}
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
# fmt: off
__a : Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__a : str = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__a : str = {"""unk_token""": """<unk>"""}
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Any = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__a : Dict = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Dict = self.get_input_output_texts(_lowercase )
__a : Union[str, Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
__a : List[Any] = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase )
return text, ids
def lowerCAmelCase__(self ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__(self ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__(self ):
'''simple docstring'''
pass # TODO add if relevant
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.get_tokenizer()
# Testing tokenization
__a : str = """こんにちは、世界。 こんばんは、㔺界。"""
__a : List[str] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__a : int = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids without special tokens
__a : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__a : Tuple = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids with special tokens
__a : List[Any] = tokens + [tokenizer.unk_token]
__a : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__a : str = tokenizer.convert_tokens_to_ids(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.get_tokenizer()
# Testing tokenization
__a : Any = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__a : Any = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__a : Tuple = tokenizer.encode(_lowercase )
__a : List[Any] = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , _lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a : Optional[int] = """こんにちは、世界。"""
__a : Union[str, Any] = """こんばんは、㔺界。😀"""
__a : List[str] = """こんにちは、世界。こんばんは、世界。😀"""
__a : Tuple = tokenizer.encode(prefix_text + input_text )
__a : Optional[int] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__a : Union[str, Any] = tokenizer.encode(_lowercase , prefix_text=_lowercase )
__a : str = tokenizer.decode(_lowercase )
__a : Optional[Any] = tokenizer.decode(_lowercase )
__a : str = tokenizer.decode(_lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__a : Union[str, Any] = """こんにちは、世界。"""
__a : Optional[int] = """こんばんは、㔺界。😀"""
__a : Optional[Any] = len(tokenizer.encode(_lowercase ) ) - 2
__a : List[Any] = len(tokenizer.encode(_lowercase ) ) - 2
__a : List[str] = [1] + [0] * (len_prefix + len_text + 1)
__a : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
__a : str = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__a : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids
__a : Optional[int] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__a : Union[str, Any] = tokenizer(_lowercase , prefix_text=_lowercase ).token_type_ids
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a : Dict = tokenizer.encode("""あンいワ""" )
__a : List[Any] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__a : List[Any] = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) )
self.assertEqual(tokenizer.decode(_lowercase ) , tokenizer.decode(_lowercase ) )
self.assertNotEqual(_lowercase , _lowercase )
self.assertNotEqual(_lowercase , _lowercase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__a : Optional[int] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__a : str = tokenizer(_lowercase , padding=_lowercase )
__a : Optional[int] = tokenizer.batch_encode_plus(_lowercase , padding=_lowercase )
# fmt: off
__a : Union[str, Any] = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
__a : str = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__a : Any = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _lowercase )
self.assertListEqual(x_token.token_type_ids , _lowercase )
self.assertListEqual(x_token.attention_mask , _lowercase )
self.assertListEqual(x_token_a.input_ids , _lowercase )
self.assertListEqual(x_token_a.token_type_ids , _lowercase )
self.assertListEqual(x_token_a.attention_mask , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "git_vision_model"
def __init__(self , _lowercase=768 , _lowercase=3072 , _lowercase=12 , _lowercase=12 , _lowercase=3 , _lowercase=224 , _lowercase=16 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = hidden_size
__a : Optional[int] = intermediate_size
__a : Optional[int] = num_hidden_layers
__a : str = num_attention_heads
__a : str = num_channels
__a : Any = patch_size
__a : int = image_size
__a : str = initializer_range
__a : Any = attention_dropout
__a : List[str] = layer_norm_eps
__a : List[str] = hidden_act
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : int = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
__a : Union[str, Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "git"
def __init__(self , _lowercase=None , _lowercase=30522 , _lowercase=768 , _lowercase=6 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=1024 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=0 , _lowercase="absolute" , _lowercase=True , _lowercase=False , _lowercase=101 , _lowercase=102 , _lowercase=None , **_lowercase , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
__a : Dict = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
__a : Union[str, Any] = GitVisionConfig(**_lowercase )
__a : Tuple = vocab_size
__a : Tuple = hidden_size
__a : Optional[int] = num_hidden_layers
__a : int = num_attention_heads
__a : List[Any] = hidden_act
__a : Optional[Any] = intermediate_size
__a : Optional[Any] = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : int = initializer_range
__a : Dict = layer_norm_eps
__a : Dict = position_embedding_type
__a : List[str] = use_cache
__a : int = tie_word_embeddings
__a : int = num_image_with_embedding
__a : List[str] = bos_token_id
__a : Union[str, Any] = eos_token_id
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = copy.deepcopy(self.__dict__ )
__a : Optional[Any] = self.vision_config.to_dict()
__a : Tuple = self.__class__.model_type
return output
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
import math
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : bool , _lowerCamelCase : list[int] , _lowerCamelCase : float ):
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(_lowerCamelCase ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
def __magic_name__ ( ):
__a : Any = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__a : Optional[Any] = math.log(len(_lowerCamelCase ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def __magic_name__ ( _lowerCamelCase : int ):
__a : List[str] = DPTConfig()
if "large" in checkpoint_url:
__a : Dict = 1_0_2_4
__a : Optional[Any] = 4_0_9_6
__a : Dict = 2_4
__a : Dict = 1_6
__a : List[str] = [5, 1_1, 1_7, 2_3]
__a : Optional[Any] = [2_5_6, 5_1_2, 1_0_2_4, 1_0_2_4]
__a : List[Any] = (1, 3_8_4, 3_8_4)
if "ade" in checkpoint_url:
__a : Tuple = True
__a : List[Any] = 1_5_0
__a : Optional[int] = """huggingface/label-files"""
__a : Union[str, Any] = """ade20k-id2label.json"""
__a : Dict = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__a : Optional[int] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__a : str = idalabel
__a : Dict = {v: k for k, v in idalabel.items()}
__a : Union[str, Any] = [1, 1_5_0, 4_8_0, 4_8_0]
return config, expected_shape
def __magic_name__ ( _lowerCamelCase : Tuple ):
__a : Union[str, Any] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : str ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__a : str = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
__a : Dict = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
__a : Optional[int] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
__a : Dict = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
__a : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
__a : Optional[int] = name.replace("""proj""" , """projection""" )
if "blocks" in name:
__a : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
__a : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__a : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
__a : List[str] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__a : Dict = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
__a : str = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
__a : Dict = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
__a : Tuple = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
__a : List[str] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
__a : Union[str, Any] = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
__a : List[str] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
__a : List[Any] = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__a : int = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__a : str = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
__a : str = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
__a : Any = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
__a : str = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
__a : str = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__a : Any = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
__a : Any = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
__a : int = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
__a : List[Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__a : Union[str, Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
__a : Union[str, Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
__a : Optional[int] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
__a : Union[str, Any] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
__a : List[Any] = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
__a : List[str] = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
__a : Dict = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
__a : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
__a : Optional[Any] = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
__a : str = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
__a : Optional[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
__a : Tuple = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__a : Dict = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__a : List[str] = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__a : str = in_proj_weight[: config.hidden_size, :]
__a : Dict = in_proj_bias[: config.hidden_size]
__a : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__a : int = in_proj_weight[
-config.hidden_size :, :
]
__a : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ):
__a : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__a : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] ):
__a , __a : Optional[int] = get_dpt_config(_lowerCamelCase )
# load original state_dict from URL
__a : Optional[Any] = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(_lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
__a : Dict = state_dict.pop(_lowerCamelCase )
__a : Dict = val
# read in qkv matrices
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
__a : int = DPTForSemanticSegmentation(_lowerCamelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Check outputs on an image
__a : int = 4_8_0 if """ade""" in checkpoint_url else 3_8_4
__a : str = DPTImageProcessor(size=_lowerCamelCase )
__a : Dict = prepare_img()
__a : List[Any] = image_processor(_lowerCamelCase , return_tensors="""pt""" )
# forward pass
__a : Union[str, Any] = model(**_lowerCamelCase ).logits if """ade""" in checkpoint_url else model(**_lowerCamelCase ).predicted_depth
# Assert logits
__a : Optional[Any] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
__a : Optional[Any] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(_lowerCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _lowerCamelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _lowerCamelCase )
)
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_lowerCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_lowerCamelCase , _lowerCamelCase ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
lowercase__ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 63 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : list[float] ):
if len(_lowerCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
__a : str = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
import queue
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : Tuple = data
__a : int = None
__a : List[str] = None
def __magic_name__ ( ):
print("""\n********Press N to stop entering at any point of time********\n""" )
__a : Any = input("""Enter the value of the root node: """ ).strip().lower()
__a : queue.Queue = queue.Queue()
__a : int = TreeNode(int(_lowerCamelCase ) )
q.put(_lowerCamelCase )
while not q.empty():
__a : Optional[int] = q.get()
__a : List[Any] = F'''Enter the left node of {node_found.data}: '''
__a : Optional[Any] = input(_lowerCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__a : List[Any] = TreeNode(int(_lowerCamelCase ) )
__a : Union[str, Any] = left_node
q.put(_lowerCamelCase )
__a : List[Any] = F'''Enter the right node of {node_found.data}: '''
__a : Any = input(_lowerCamelCase ).strip().lower() or """n"""
if check == "n":
return tree_node
__a : Dict = TreeNode(int(_lowerCamelCase ) )
__a : Any = right_node
q.put(_lowerCamelCase )
raise
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
__a : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
__a : Dict = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
__a : queue.Queue = queue.Queue()
q.put(_lowerCamelCase )
while not q.empty():
__a : Union[str, Any] = []
while not q.empty():
__a : Any = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
__a : list[TreeNode] = []
__a : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(_lowerCamelCase )
__a : Optional[int] = n.left
# end of while means current node doesn't have left child
__a : str = stack.pop()
# start to traverse its right child
__a : Dict = n.right
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
__a : list[TreeNode] = []
__a : Dict = node
while n or stack:
while n:
stack.append(_lowerCamelCase )
__a : str = n.left
__a : int = stack.pop()
print(n.data , end=""",""" )
__a : List[str] = n.right
def __magic_name__ ( _lowerCamelCase : TreeNode ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not node:
return
__a , __a : Tuple = [], []
__a : Dict = node
stacka.append(_lowerCamelCase )
while stacka: # to find the reversed order of post order, store it in stack2
__a : Optional[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_lowerCamelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __magic_name__ ( _lowerCamelCase : str = "" , _lowerCamelCase : List[str]=5_0 , _lowerCamelCase : Dict="*" ):
if not s:
return "\n" + width * char
__a , __a : Tuple = divmod(width - len(_lowerCamelCase ) - 2 , 2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
lowercase__ = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 50 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
"""simple docstring"""
from torch import nn
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'''Unsupported activation function: {act_fn}''' )
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int = 1_0**1_2 ):
__a : List[str] = 1
__a : Any = 0
__a : Any = 1
__a : List[Any] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'{solution() = }')
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 1 / sqrt(2 ) ):
__a : Optional[int] = tau * frequency / samplerate
__a : Dict = sin(_lowerCamelCase )
__a : Optional[int] = cos(_lowerCamelCase )
__a : int = _sin / (2 * q_factor)
__a : List[Any] = (1 - _cos) / 2
__a : Dict = 1 - _cos
__a : Optional[int] = 1 + alpha
__a : List[str] = -2 * _cos
__a : Optional[Any] = 1 - alpha
__a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 1 / sqrt(2 ) ):
__a : str = tau * frequency / samplerate
__a : Optional[Any] = sin(_lowerCamelCase )
__a : List[str] = cos(_lowerCamelCase )
__a : Any = _sin / (2 * q_factor)
__a : Optional[int] = (1 + _cos) / 2
__a : int = -1 - _cos
__a : Optional[Any] = 1 + alpha
__a : int = -2 * _cos
__a : str = 1 - alpha
__a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 1 / sqrt(2 ) ):
__a : Any = tau * frequency / samplerate
__a : List[Any] = sin(_lowerCamelCase )
__a : Union[str, Any] = cos(_lowerCamelCase )
__a : List[str] = _sin / (2 * q_factor)
__a : List[str] = _sin / 2
__a : str = 0
__a : int = -ba
__a : List[str] = 1 + alpha
__a : Any = -2 * _cos
__a : Optional[Any] = 1 - alpha
__a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 1 / sqrt(2 ) ):
__a : Optional[int] = tau * frequency / samplerate
__a : List[Any] = sin(_lowerCamelCase )
__a : Dict = cos(_lowerCamelCase )
__a : Optional[int] = _sin / (2 * q_factor)
__a : List[str] = 1 - alpha
__a : Any = -2 * _cos
__a : Tuple = 1 + alpha
__a : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float = 1 / sqrt(2 ) , ):
__a : Union[str, Any] = tau * frequency / samplerate
__a : Optional[Any] = sin(_lowerCamelCase )
__a : str = cos(_lowerCamelCase )
__a : Any = _sin / (2 * q_factor)
__a : Any = 1_0 ** (gain_db / 4_0)
__a : str = 1 + alpha * big_a
__a : str = -2 * _cos
__a : Optional[int] = 1 - alpha * big_a
__a : Optional[int] = 1 + alpha / big_a
__a : str = -2 * _cos
__a : Tuple = 1 - alpha / big_a
__a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float = 1 / sqrt(2 ) , ):
__a : Union[str, Any] = tau * frequency / samplerate
__a : str = sin(_lowerCamelCase )
__a : List[Any] = cos(_lowerCamelCase )
__a : Dict = _sin / (2 * q_factor)
__a : Any = 1_0 ** (gain_db / 4_0)
__a : int = (big_a + 1) - (big_a - 1) * _cos
__a : str = (big_a + 1) + (big_a - 1) * _cos
__a : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
__a : Dict = (big_a - 1) + (big_a + 1) * _cos
__a : int = 2 * sqrt(_lowerCamelCase ) * alpha
__a : str = big_a * (pmc + aaa)
__a : Optional[int] = 2 * big_a * mpc
__a : Optional[int] = big_a * (pmc - aaa)
__a : Optional[int] = ppmc + aaa
__a : List[Any] = -2 * pmpc
__a : List[str] = ppmc - aaa
__a : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float = 1 / sqrt(2 ) , ):
__a : str = tau * frequency / samplerate
__a : Dict = sin(_lowerCamelCase )
__a : List[str] = cos(_lowerCamelCase )
__a : Tuple = _sin / (2 * q_factor)
__a : Optional[int] = 1_0 ** (gain_db / 4_0)
__a : str = (big_a + 1) - (big_a - 1) * _cos
__a : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
__a : Optional[Any] = (big_a - 1) - (big_a + 1) * _cos
__a : Dict = (big_a - 1) + (big_a + 1) * _cos
__a : Optional[Any] = 2 * sqrt(_lowerCamelCase ) * alpha
__a : Union[str, Any] = big_a * (ppmc + aaa)
__a : Tuple = -2 * big_a * pmpc
__a : str = big_a * (ppmc - aaa)
__a : List[str] = pmc + aaa
__a : Tuple = 2 * mpc
__a : List[str] = pmc - aaa
__a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 63 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaInpaintPipeline
_lowerCAmelCase = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
_lowerCAmelCase = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_lowerCAmelCase = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__a : List[str] = UNetaDConditionModel(**_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.dummy_unet
__a : str = self.dummy_movq
__a : Optional[int] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowercase , )
__a : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
__a : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
__a : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
__a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : Union[str, Any] = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
__a : Tuple = np.ones((64, 64) , dtype=np.floataa )
__a : int = 0
if str(_lowercase ).startswith("""mps""" ):
__a : List[str] = torch.manual_seed(_lowercase )
else:
__a : Tuple = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Optional[Any] = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Tuple = self.pipeline_class(**_lowercase )
__a : str = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Any = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.images
__a : Optional[Any] = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : Optional[int] = image[0, -3:, -3:, -1]
__a : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
__a : Tuple = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowerCAmelCase__(self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
__a : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__a : Tuple = np.ones((768, 768) , dtype=np.floataa )
__a : str = 0
__a : List[Any] = """a hat"""
__a : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
__a : int = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
__a : List[Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
__a : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__a , __a : Union[str, Any] = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__a : Dict = pipeline(
image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
__a : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 63 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = MgpstrTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = {}
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
# fmt: off
__a : int = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__a : List[Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : str = """tester"""
__a : int = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.get_tokenizers(do_lower_case=_lowercase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__a : Any = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__a : str = tokenizer.encode([special_token] , add_special_tokens=_lowercase )
self.assertEqual(len(_lowercase ) , 1 )
__a : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__a , __a : str = self.get_input_output_texts(_lowercase )
__a : str = tokenizer.tokenize(_lowercase )
__a : Dict = tokenizer.convert_tokens_to_ids(_lowercase )
__a : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
__a : Optional[int] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertNotEqual(len(_lowercase ) , 0 )
__a : str = tokenizer.decode(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _lowercase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = tempfile.mkdtemp()
__a : Dict = BlipImageProcessor()
__a : int = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-BertModel""" )
__a : Optional[Any] = BlipProcessor(_lowercase , _lowercase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).tokenizer
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowercase ).image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a : int = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a : int = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__a : int = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
__a : Optional[Any] = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.get_image_processor()
__a : List[str] = self.get_tokenizer()
__a : List[str] = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__a : Union[str, Any] = self.prepare_image_inputs()
__a : str = image_processor(_lowercase , return_tensors="""np""" )
__a : Optional[int] = processor(images=_lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : int = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__a : Tuple = """lower newer"""
__a : List[str] = processor(text=_lowercase )
__a : Optional[int] = tokenizer(_lowercase , return_token_type_ids=_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.get_image_processor()
__a : Dict = self.get_tokenizer()
__a : Optional[Any] = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__a : List[str] = """lower newer"""
__a : Dict = self.prepare_image_inputs()
__a : List[str] = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.get_image_processor()
__a : int = self.get_tokenizer()
__a : Union[str, Any] = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a : Any = processor.batch_decode(_lowercase )
__a : Dict = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.get_image_processor()
__a : Any = self.get_tokenizer()
__a : Tuple = BlipProcessor(tokenizer=_lowercase , image_processor=_lowercase )
__a : str = """lower newer"""
__a : Optional[Any] = self.prepare_image_inputs()
__a : Optional[Any] = processor(text=_lowercase , images=_lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ = {
"configuration_graphormer": ["GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "GraphormerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"GraphormerForGraphClassification",
"GraphormerModel",
"GraphormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase__ = logging.getLogger()
def __magic_name__ ( _lowerCamelCase : Path , _lowerCamelCase : list ):
__a : List[Any] = """\n""".join(_lowerCamelCase )
Path(_lowerCamelCase ).open("""w""" ).writelines(_lowerCamelCase )
lowercase__ = "patrickvonplaten/t5-tiny-random"
lowercase__ = "sshleifer/bart-tiny-random"
lowercase__ = "sshleifer/tiny-mbart"
lowercase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__a : Dict = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__a : List[Any] = [""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_lowercase , _lowercase )
__a : Dict = str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
__a : Dict = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__a : List[str] = F'''
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
'''.split()
with patch.object(_lowercase , """argv""" , _lowercase ):
run_generate()
assert Path(_lowercase ).exists()
# os.remove(Path(output_file_name))
def lowerCAmelCase__(self ):
'''simple docstring'''
self.run_eval_tester(_lowercase )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
self.run_eval_tester(_lowercase )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
__a : Optional[Any] = input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
__a : Dict = {
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
__a : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() )
__a : Dict = str(tmp_dir / """scores.json""" )
__a : Optional[int] = str(tmp_dir / """val.target""" )
_dump_articles(_lowercase , text["""en"""] )
_dump_articles(_lowercase , text["""de"""] )
__a : int = """translation_en_to_de""" if model == T5_TINY else """summarization"""
__a : Any = F'''
run_eval_search.py
{model}
{str(_lowercase )}
{str(_lowercase )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
'''.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_lowercase , """argv""" , _lowercase ):
with CaptureStdout() as cs:
run_search()
__a : List[str] = [""" num_beams | length_penalty""", model, """Best score args"""]
__a : Optional[int] = ["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_lowercase )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_lowercase ).exists()
os.remove(Path(_lowercase ) )
| 63 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 1 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = get_tests_dir("fixtures/test_sentencepiece.model")
lowercase__ = {"target_lang": "fi", "source_lang": "en"}
lowercase__ = ">>zh<<"
lowercase__ = "Helsinki-NLP/"
if is_torch_available():
lowercase__ = "pt"
elif is_tf_available():
lowercase__ = "tf"
else:
lowercase__ = "jax"
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = MarianTokenizer
_lowerCAmelCase = False
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : Any = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
__a : Union[str, Any] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Any = Path(self.tmpdirname )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(_lowercase , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(_lowercase , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
__a : Any = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = """</s>"""
__a : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(_lowercase ) , 9 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = MarianTokenizer.from_pretrained(F'''{ORG_NAME}opus-mt-en-de''' )
__a : Optional[Any] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
__a : Optional[int] = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(_lowercase , batch.input_ids[0] )
__a : str = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(_lowercase )
__a : Any = [x.name for x in Path(_lowercase ).glob("""*""" )]
self.assertIn("""source.spm""" , _lowercase )
MarianTokenizer.from_pretrained(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.get_tokenizer()
__a : Optional[int] = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = self.get_tokenizer()
__a : Dict = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=_lowercase , return_tensors=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
__a : Optional[int] = """Tämä on testi"""
__a : Tuple = """This is a test"""
__a : List[Any] = [76, 7, 2047, 2]
__a : List[Any] = [69, 12, 11, 940, 2]
__a : Union[str, Any] = tokenizer(_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
__a : Optional[int] = tokenizer(text_target=_lowercase ).input_ids
self.assertListEqual(_lowercase , _lowercase )
__a : Tuple = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase )
self.assertEqual(_lowercase , _lowercase )
| 63 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 1 |
"""simple docstring"""
import qiskit
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int ):
__a : List[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
__a : int = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__a : str = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=2 , _lowercase=32 , _lowercase=16 , _lowercase=3 , _lowercase=True , _lowercase=True , _lowercase=32 , _lowercase=4 , _lowercase=[0, 1, 2, 3] , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=[1, 384, 24, 24] , _lowercase=True , _lowercase=None , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : List[Any] = batch_size
__a : Tuple = image_size
__a : Union[str, Any] = patch_size
__a : Tuple = num_channels
__a : Dict = is_training
__a : str = use_labels
__a : List[Any] = hidden_size
__a : Dict = num_hidden_layers
__a : str = backbone_out_indices
__a : List[str] = num_attention_heads
__a : Dict = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Any = hidden_dropout_prob
__a : Tuple = attention_probs_dropout_prob
__a : Any = initializer_range
__a : Optional[Any] = num_labels
__a : Dict = backbone_featmap_shape
__a : List[Any] = scope
__a : List[str] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__a : str = (image_size // patch_size) ** 2
__a : List[Any] = num_patches + 1
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Optional[Any] = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 192, 384, 768],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[int] = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Union[str, Any] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = self.num_labels
__a : Optional[int] = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
__a : int = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = self.num_labels
__a : List[Any] = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
__a : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.prepare_config_and_inputs()
__a , __a , __a : List[str] = config_and_inputs
__a : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
_lowerCAmelCase = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = DPTModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : str = model_class(_lowercase )
__a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Any = True
if model_class in get_values(_lowercase ):
continue
__a : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.train()
__a : Optional[Any] = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__a : str = model(**_lowercase ).loss
loss.backward()
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[str] = False
__a : Dict = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
__a : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
__a : int = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
__a : Optional[int] = model(**_lowercase ).loss
loss.backward()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
__a : str = model_class(config=_lowercase )
# Skip the check for the backbone
__a : str = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__a : List[str] = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__a : List[str] = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = """add"""
with self.assertRaises(_lowercase ):
__a : Union[str, Any] = DPTForDepthEstimation(_lowercase )
def __magic_name__ ( ):
__a : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
__a : List[str] = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_lowercase )
__a : Dict = prepare_img()
__a : List[Any] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a : Dict = model(**_lowercase )
__a : Tuple = outputs.predicted_depth
# verify the predicted depth
__a : Dict = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _lowercase )
__a : List[str] = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _lowercase , atol=1e-4 ) )
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
lowercase__ = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase__ = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
lowercase__ = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
def remove_articles(_lowerCamelCase : List[str] ):
__a : str = re.compile(r"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(_lowerCamelCase , """ """ , _lowerCamelCase )
def white_space_fix(_lowerCamelCase : List[str] ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase : str ):
__a : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : int ):
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ):
__a : str = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 1_0_0
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
__a : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams]
__a : Union[str, Any] = Counter(_lowerCamelCase )
__a : List[str] = Counter(_lowerCamelCase )
__a : str = Counter()
for sgram, scount in sgramcounter.items():
__a : Optional[int] = scount * numref
__a : Optional[Any] = Counter(_lowerCamelCase )
__a : Optional[int] = Counter()
for cgram, ccount in cgramcounter.items():
__a : Dict = ccount * numref
# KEEP
__a : List[str] = sgramcounter_rep & cgramcounter_rep
__a : Union[str, Any] = keepgramcounter_rep & rgramcounter
__a : Union[str, Any] = sgramcounter_rep & rgramcounter
__a : int = 0
__a : str = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__a : Optional[Any] = 1
__a : List[str] = 1
if len(_lowerCamelCase ) > 0:
__a : List[Any] = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__a : int = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__a : List[Any] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__a : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__a : Optional[Any] = sgramcounter_rep - cgramcounter_rep
__a : Tuple = delgramcounter_rep - rgramcounter
__a : int = sgramcounter_rep - rgramcounter
__a : Any = 0
__a : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__a : Any = 1
if len(_lowerCamelCase ) > 0:
__a : List[str] = deltmpscorea / len(_lowerCamelCase )
# ADDITION
__a : Dict = set(_lowerCamelCase ) - set(_lowerCamelCase )
__a : Optional[int] = set(_lowerCamelCase ) & set(_lowerCamelCase )
__a : List[Any] = set(_lowerCamelCase ) - set(_lowerCamelCase )
__a : Tuple = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__a : Union[str, Any] = 1
__a : Optional[Any] = 1
if len(_lowerCamelCase ) > 0:
__a : Optional[Any] = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
__a : Tuple = addtmpscore / len(_lowerCamelCase )
__a : Tuple = 0
if addscore_precision > 0 or addscore_recall > 0:
__a : str = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
__a : List[str] = len(_lowerCamelCase )
__a : str = ssent.split(""" """ )
__a : str = csent.split(""" """ )
__a : Optional[int] = []
__a : Any = []
__a : Dict = []
__a : Tuple = []
__a : str = []
__a : Union[str, Any] = []
__a : List[Any] = []
__a : Dict = []
__a : Union[str, Any] = []
__a : Tuple = []
for rsent in rsents:
__a : str = rsent.split(""" """ )
__a : Optional[int] = []
__a : Optional[int] = []
__a : Union[str, Any] = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__a : str = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__a : Dict = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__a : List[str] = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__a : List[Any] = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__a : Optional[Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__a : Tuple = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
__a : Dict = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
__a : Optional[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
__a : Optional[int] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((__a) , (__a) , (__a)) : str = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__a) , (__a) , (__a)) : Optional[Any] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__a) , (__a) , (__a)) : str = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((__a) , (__a) , (__a)) : str = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__a : List[str] = sum([delascore, delascore, delascore, delascore] ) / 4
__a : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4
__a : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : bool = True , _lowerCamelCase : str = "13a" , _lowerCamelCase : bool = True ):
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__a : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__a : Tuple = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
__a : List[Any] = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
__a : Tuple = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
__a : Tuple = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
__a : str = sentence
if not return_str:
__a : Tuple = normalized_sent.split()
return normalized_sent
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
__a : Optional[Any] = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
__a : Optional[int] = sari_score / len(_lowerCamelCase )
return 1_0_0 * sari_score
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict="exp" , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : List[str]=False , ):
__a : Any = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__a : int = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
__a : Any = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def lowerCAmelCase__(self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = {}
result.update({"""sari""": compute_sari(sources=_lowercase , predictions=_lowercase , references=_lowercase )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=_lowercase , references=_lowercase )} )
result.update({"""exact""": compute_em(predictions=_lowercase , references=_lowercase )} )
return result
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
__a : List[Any] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase )
if number < 1:
__a : str = F'''Input value of [number={number}] must be > 0'''
raise ValueError(_lowerCamelCase )
__a : Dict = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" , from_pt=_lowercase , dtype=jnp.bfloataa )
__a , __a : Any = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
__a : List[str] = controlnet_params
__a : Any = """bird"""
__a : Tuple = jax.device_count()
__a : int = pipe.prepare_text_inputs([prompts] * num_samples )
__a : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
__a : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
__a : Union[str, Any] = jax.random.PRNGKey(0 )
__a : Optional[int] = jax.random.split(_lowercase , jax.device_count() )
__a : str = replicate(_lowercase )
__a : Optional[int] = shard(_lowercase )
__a : Optional[int] = shard(_lowercase )
__a : Tuple = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__a : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : Union[str, Any] = images[0, 253:256, 253:256, -1]
__a : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : Tuple = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Any = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" , from_pt=_lowercase , dtype=jnp.bfloataa )
__a , __a : str = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , controlnet=_lowercase , from_pt=_lowercase , dtype=jnp.bfloataa )
__a : List[str] = controlnet_params
__a : Union[str, Any] = """Chef in the kitchen"""
__a : List[str] = jax.device_count()
__a : Dict = pipe.prepare_text_inputs([prompts] * num_samples )
__a : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
__a : Optional[Any] = pipe.prepare_image_inputs([pose_image] * num_samples )
__a : Dict = jax.random.PRNGKey(0 )
__a : int = jax.random.split(_lowercase , jax.device_count() )
__a : int = replicate(_lowercase )
__a : Optional[int] = shard(_lowercase )
__a : str = shard(_lowercase )
__a : Optional[int] = pipe(
prompt_ids=_lowercase , image=_lowercase , params=_lowercase , prng_seed=_lowercase , num_inference_steps=50 , jit=_lowercase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
__a : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__a : List[str] = images[0, 253:256, 253:256, -1]
__a : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__a : Tuple = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
lowercase__ = "docs/source/en/_toctree.yml"
def __magic_name__ ( _lowerCamelCase : str ):
__a : str = defaultdict(_lowerCamelCase )
__a : Any = []
__a : int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} )
else:
new_doc_list.append(_lowerCamelCase )
__a : Optional[int] = new_doc_list
__a : Union[str, Any] = [key for key, value in counts.items() if value > 1]
__a : Dict = []
for duplicate_key in duplicates:
__a : Union[str, Any] = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] )
__a : Optional[int] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def __magic_name__ ( _lowerCamelCase : int=False ):
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__a : List[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__a : str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__a : Any = content[api_idx]["""sections"""]
# Then to the model doc
__a : Tuple = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__a : List[str] = api_doc[scheduler_idx]["""sections"""]
__a : Tuple = clean_doc_toc(_lowerCamelCase )
__a : Dict = False
if new_scheduler_doc != scheduler_doc:
__a : Dict = True
if overwrite:
__a : Optional[Any] = new_scheduler_doc
if diff:
if overwrite:
__a : Optional[Any] = api_doc
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
def __magic_name__ ( _lowerCamelCase : int=False ):
with open(_lowerCamelCase , encoding="""utf-8""" ) as f:
__a : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__a : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__a : Union[str, Any] = content[api_idx]["""sections"""]
# Then to the model doc
__a : int = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__a : Tuple = False
__a : Any = api_doc[pipeline_idx]["""sections"""]
__a : Dict = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__a : Any = pipeline_doc["""section"""]
__a : Optional[Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
__a : List[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
__a : int = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
__a : List[Any] = True
if overwrite:
__a : Optional[Any] = new_pipeline_docs
if diff:
if overwrite:
__a : Union[str, Any] = api_doc
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase__ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowercase__ = "sshleifer/bart-tiny-random"
lowercase__ = "patrickvonplaten/t5-tiny-random"
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return AutoConfig.from_pretrained(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Any = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Tuple = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Optional[int] = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=_lowercase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , *__a : Tuple = create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def lowerCAmelCase__(self ):
'''simple docstring'''
with self.assertRaises(_lowercase ):
create_student_by_copying_alternating_layers(_lowercase , tempfile.mkdtemp() , e=_lowercase , d=_lowercase )
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
__a : Dict = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
__a : List[str] = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : DatasetInfo ):
__a : str = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
__a : List[str] = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , """dataset_info.json""" ) )
def __magic_name__ ( ):
__a : Dict = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
__a : List[str] = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__a : Optional[Any] = yaml.safe_dump(_lowerCamelCase )
__a : List[Any] = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def __magic_name__ ( ):
__a : Union[str, Any] = DatasetInfo()
__a : List[str] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def __magic_name__ ( _lowerCamelCase : Tuple , _lowerCamelCase : DatasetInfosDict ):
__a : List[str] = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
__a : Union[str, Any] = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__a : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__a : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , """README.md""" ) )
| 63 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __magic_name__ ( ):
print(sum_of_series(1 , 1 , 1_0 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = 1
@register_to_config
def __init__(self , _lowercase = 1000 , _lowercase = None ):
'''simple docstring'''
self.set_timesteps(_lowercase )
# standard deviation of the initial noise distribution
__a : Union[str, Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__a : Union[str, Any] = 4
# running values
__a : int = []
def lowerCAmelCase__(self , _lowercase , _lowercase = None ):
'''simple docstring'''
__a : Dict = num_inference_steps
__a : List[str] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__a : Dict = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__a : Any = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__a : Optional[Any] = torch.sin(steps * math.pi / 2 ) ** 2
__a : Any = (1.0 - self.betas**2) ** 0.5
__a : Optional[int] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__a : List[str] = timesteps.to(_lowercase )
__a : Union[str, Any] = []
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__a : Dict = (self.timesteps == timestep).nonzero().item()
__a : Union[str, Any] = timestep_index + 1
__a : Any = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_lowercase )
if len(self.ets ) == 1:
__a : Dict = self.ets[-1]
elif len(self.ets ) == 2:
__a : Any = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__a : Optional[int] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__a : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__a : Dict = self._get_prev_sample(_lowercase , _lowercase , _lowercase , _lowercase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowercase )
def lowerCAmelCase__(self , _lowercase , *_lowercase , **_lowercase ):
'''simple docstring'''
return sample
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self.alphas[timestep_index]
__a : Dict = self.betas[timestep_index]
__a : Any = self.alphas[prev_timestep_index]
__a : Any = self.betas[prev_timestep_index]
__a : Dict = (sample - sigma * ets) / max(_lowercase , 1e-8 )
__a : List[str] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowercase__ = "scheduler_config.json"
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 5
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = SCHEDULER_CONFIG_NAME
_lowerCAmelCase = ["dtype"]
_lowerCAmelCase = []
_lowerCAmelCase = True
@classmethod
def lowerCAmelCase__(cls , _lowercase = None , _lowercase = None , _lowercase=False , **_lowercase , ):
'''simple docstring'''
__a , __a : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=_lowercase , subfolder=_lowercase , return_unused_kwargs=_lowercase , **_lowercase , )
__a , __a : List[Any] = cls.from_config(_lowercase , return_unused_kwargs=_lowercase , **_lowercase )
if hasattr(_lowercase , """create_state""" ) and getattr(_lowercase , """has_state""" , _lowercase ):
__a : str = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowerCAmelCase__(self , _lowercase , _lowercase = False , **_lowercase ):
'''simple docstring'''
self.save_config(save_directory=_lowercase , push_to_hub=_lowercase , **_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCAmelCase__(cls ):
'''simple docstring'''
__a : Any = list(set([cls.__name__] + cls._compatibles ) )
__a : str = importlib.import_module(__name__.split(""".""" )[0] )
__a : Union[str, Any] = [
getattr(_lowercase , _lowercase ) for c in compatible_classes_str if hasattr(_lowercase , _lowercase )
]
return compatible_classes
def __magic_name__ ( _lowerCamelCase : jnp.ndarray , _lowerCamelCase : Tuple[int] ):
assert len(_lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCamelCase ) - x.ndim) ) , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int=0.9_99 , _lowerCamelCase : int=jnp.floataa ):
def alpha_bar(_lowerCamelCase : List[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__a : str = []
for i in range(_lowerCamelCase ):
__a : List[str] = i / num_diffusion_timesteps
__a : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCamelCase ) / alpha_bar(_lowerCamelCase ) , _lowerCamelCase ) )
return jnp.array(_lowerCamelCase , dtype=_lowerCamelCase )
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
@classmethod
def lowerCAmelCase__(cls , _lowercase ):
'''simple docstring'''
__a : int = scheduler.config
if config.trained_betas is not None:
__a : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__a : List[str] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a : Tuple = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__a : List[Any] = 1.0 - betas
__a : Union[str, Any] = jnp.cumprod(_lowercase , axis=0 )
return cls(
alphas=_lowercase , betas=_lowercase , alphas_cumprod=_lowercase , )
def __magic_name__ ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ):
__a : List[str] = state.alphas_cumprod
__a : Dict = alphas_cumprod[timesteps] ** 0.5
__a : Any = sqrt_alpha_prod.flatten()
__a : Union[str, Any] = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape )
__a : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
__a : int = sqrt_one_minus_alpha_prod.flatten()
__a : int = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __magic_name__ ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ):
__a , __a : Union[str, Any] = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __magic_name__ ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ):
__a , __a : Union[str, Any] = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int = 1_0_0 ):
__a : Union[str, Any] = 0
__a : Union[str, Any] = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f'{solution() = }')
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
def __magic_name__ ( ):
__a : Optional[Any] = Node(1 )
__a : List[str] = Node(2 )
__a : Union[str, Any] = Node(3 )
__a : Tuple = Node(4 )
__a : int = Node(5 )
return tree
def __magic_name__ ( _lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def __magic_name__ ( _lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def __magic_name__ ( _lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def __magic_name__ ( _lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def __magic_name__ ( _lowerCamelCase : Node | None ):
__a : list[Any] = []
if root is None:
return output
__a : Tuple = deque([root] )
while process_queue:
__a : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def __magic_name__ ( _lowerCamelCase : Node | None , _lowerCamelCase : int ):
__a : list[Any] = []
def populate_output(_lowerCamelCase : Node | None , _lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def __magic_name__ ( _lowerCamelCase : Node | None , _lowerCamelCase : int ):
__a : list[Any] = []
def populate_output(_lowerCamelCase : Node | None , _lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(_lowerCamelCase , _lowerCamelCase )
return output
def __magic_name__ ( _lowerCamelCase : Node | None ):
if root is None:
return []
__a : list[Sequence[Node | None]] = []
__a : List[str] = 0
__a : Optional[Any] = height(_lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(_lowerCamelCase , _lowerCamelCase ) )
__a : Any = 1
else:
output.append(get_nodes_from_right_to_left(_lowerCamelCase , _lowerCamelCase ) )
__a : Optional[Any] = 0
return output
def __magic_name__ ( ): # Main function for testing.
__a : Union[str, Any] = make_tree()
print(F'''In-order Traversal: {inorder(_lowerCamelCase )}''' )
print(F'''Pre-order Traversal: {preorder(_lowerCamelCase )}''' )
print(F'''Post-order Traversal: {postorder(_lowerCamelCase )}''' , """\n""" )
print(F'''Height of Tree: {height(_lowerCamelCase )}''' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(_lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(_lowerCamelCase ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(_lowerCamelCase , level=_lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
"""simple docstring"""
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowercase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "linear"
_lowerCAmelCase = "cosine"
_lowerCAmelCase = "cosine_with_restarts"
_lowerCAmelCase = "polynomial"
_lowerCAmelCase = "constant"
_lowerCAmelCase = "constant_with_warmup"
_lowerCAmelCase = "piecewise_constant"
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int = -1 ):
return LambdaLR(_lowerCamelCase , lambda _lowerCamelCase : 1 , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1.0 , _lowerCamelCase ) )
return 1.0
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : str , _lowerCamelCase : int = -1 ):
__a : Optional[int] = {}
__a : Any = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__a , __a : int = rule_str.split(""":""" )
__a : Optional[int] = int(_lowerCamelCase )
__a : str = float(_lowerCamelCase )
__a : int = value
__a : Dict = float(rule_list[-1] )
def create_rules_function(_lowerCamelCase : str , _lowerCamelCase : Tuple ):
def rule_func(_lowerCamelCase : int ) -> float:
__a : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_lowerCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__a : Optional[int] = create_rules_function(_lowerCamelCase , _lowerCamelCase )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , last_epoch=_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : str=-1 ):
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0.5 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Any ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_lowerCamelCase ) * 2.0 * progress )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optimizer , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 ):
def lr_lambda(_lowerCamelCase : Optional[int] ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
__a : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_lowerCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]=1E-7 , _lowerCamelCase : Optional[int]=1.0 , _lowerCamelCase : Optional[int]=-1 ):
__a : Union[str, Any] = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(_lowerCamelCase : int ):
if current_step < num_warmup_steps:
return float(_lowerCamelCase ) / float(max(1 , _lowerCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__a : Tuple = lr_init - lr_end
__a : int = num_training_steps - num_warmup_steps
__a : Optional[int] = 1 - (current_step - num_warmup_steps) / decay_steps
__a : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
lowercase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( _lowerCamelCase : Union[str, SchedulerType] , _lowerCamelCase : Optimizer , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1.0 , _lowerCamelCase : int = -1 , ):
__a : int = SchedulerType(_lowerCamelCase )
__a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_lowerCamelCase , last_epoch=_lowerCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_lowerCamelCase , step_rules=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_lowerCamelCase , num_warmup_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , num_cycles=_lowerCamelCase , last_epoch=_lowerCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , power=_lowerCamelCase , last_epoch=_lowerCamelCase , )
return schedule_func(
_lowerCamelCase , num_warmup_steps=_lowerCamelCase , num_training_steps=_lowerCamelCase , last_epoch=_lowerCamelCase )
| 63 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 |
"""simple docstring"""
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=False ):
__a : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : int=None ):
if conf_path is None:
__a : str = """./model_checkpoints/vqgan_only.yaml"""
__a : List[Any] = load_config(_lowerCamelCase , display=_lowerCamelCase )
__a : Dict = VQModel(**config.model.params )
if ckpt_path is None:
__a : List[Any] = """./model_checkpoints/vqgan_only.pt"""
__a : Tuple = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
__a : List[str] = sd["""state_dict"""]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ):
__a , __a , __a : Tuple = model.encode(_lowerCamelCase )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__a : Union[str, Any] = model.decode(_lowerCamelCase )
return xrec
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=False ):
__a , __a : Optional[Any] = string.rsplit(""".""" , 1 )
if reload:
__a : Optional[Any] = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def __magic_name__ ( _lowerCamelCase : Any ):
if "target" not in config:
raise KeyError("""Expected key `target` to instantiate.""" )
return get_obj_from_str(config["""target"""] )(**config.get("""params""" , {} ) )
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Dict , _lowerCamelCase : int=True , _lowerCamelCase : int=True ):
__a : Union[str, Any] = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int ):
# load the specified checkpoint
if ckpt:
__a : List[str] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Any = pl_sd["""global_step"""]
print(F'''loaded model from global step {global_step}.''' )
else:
__a : List[Any] = {"""state_dict""": None}
__a : Any = None
__a : Union[str, Any] = load_model_from_config(config.model , pl_sd["""state_dict"""] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["""model"""]
return model, global_step
| 63 | 1 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __magic_name__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] ):
__a : Dict = k_size // 2
__a , __a : str = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
__a : Optional[Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
__a , __a : Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
__a : int = height - k_size + 1
__a : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
__a : Any = zeros((dst_height * dst_width, k_size * k_size) )
__a : List[str] = 0
for i, j in product(range(_lowerCamelCase ) , range(_lowerCamelCase ) ):
__a : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
__a : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
__a : List[Any] = gen_gaussian_kernel(_lowerCamelCase , _lowerCamelCase )
__a : List[str] = ravel(_lowerCamelCase )
# reshape and get the dst image
__a : Optional[Any] = dot(_lowerCamelCase , _lowerCamelCase ).reshape(_lowerCamelCase , _lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
lowercase__ = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
lowercase__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowercase__ = gaussian_filter(gray, 3, sigma=1)
lowercase__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 63 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 63 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowercase__ = True
except ImportError:
lowercase__ = False
try:
from torch.hub import _get_torch_home
lowercase__ = _get_torch_home()
except ImportError:
lowercase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowercase__ = os.path.join(torch_cache_home, "transformers")
lowercase__ = "https://cdn.huggingface.co"
lowercase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowercase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowercase__ = os.path.join(PATH, "config.yaml")
lowercase__ = os.path.join(PATH, "attributes.txt")
lowercase__ = os.path.join(PATH, "objects.txt")
lowercase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowercase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowercase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowercase__ = "pytorch_model.bin"
lowercase__ = "config.yaml"
def __magic_name__ ( _lowerCamelCase : Dict=OBJECTS , _lowerCamelCase : Tuple=ATTRIBUTES ):
__a : List[Any] = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
__a : Tuple = []
with open(_lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
__a : Union[str, Any] = OrderedDict()
with open(_lowerCamelCase , """rb""" ) as f:
__a : Union[str, Any] = pkl.load(_lowerCamelCase )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
__a : Dict = ckp.pop(_lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
__a : List[str] = torch.tensor(_lowerCamelCase )
else:
assert isinstance(_lowerCamelCase , torch.tensor ), type(_lowerCamelCase )
__a : str = v
return r
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = {}
def __init__(self , _lowercase , _lowercase = "root" , _lowercase=0 ):
'''simple docstring'''
__a : Dict = name
__a : Union[str, Any] = level
__a : List[str] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__a : Dict = copy.deepcopy(_lowercase )
__a : Tuple = copy.deepcopy(_lowercase )
if isinstance(_lowercase , _lowercase ):
__a : int = Config(_lowercase , name=_lowercase , level=level + 1 )
__a : Dict = v
setattr(self , _lowercase , _lowercase )
__a : str = d
def __repr__(self ):
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = val
__a : Union[str, Any] = val
__a : List[Any] = key.split(""".""" )
__a : List[str] = len(_lowercase ) - 1
__a : int = self._pointer
if len(_lowercase ) > 1:
for i, l in enumerate(_lowercase ):
if hasattr(self , _lowercase ) and isinstance(getattr(self , _lowercase ) , _lowercase ):
setattr(getattr(self , _lowercase ) , """.""".join(levels[i:] ) , _lowercase )
if l == last_level:
__a : Optional[Any] = val
else:
__a : List[str] = pointer[l]
def lowerCAmelCase__(self ):
'''simple docstring'''
return self._pointer
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
with open(F'''{file_name}''' , """w""" ) as stream:
dump(_lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
with open(F'''{file_name}''' , """w""" ) as stream:
json.dump(_lowercase , _lowercase )
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
with open(_lowercase ) as stream:
__a : Optional[Any] = load(_lowercase , Loader=_lowercase )
return data
def __str__(self ):
'''simple docstring'''
__a : Tuple = """ """
if self._name != "root":
__a : int = F'''{t * (self._level-1)}{self._name}:\n'''
else:
__a : Optional[Any] = """"""
__a : str = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_lowercase , _lowercase ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(_lowercase ).__name__})\n'''
__a : Tuple = level
return r[:-1]
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
__a , __a : Any = cls.get_config_dict(_lowercase , **_lowercase )
return cls(_lowercase )
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
__a : Union[str, Any] = kwargs.pop("""cache_dir""" , _lowercase )
__a : Any = kwargs.pop("""force_download""" , _lowercase )
__a : str = kwargs.pop("""resume_download""" , _lowercase )
__a : int = kwargs.pop("""proxies""" , _lowercase )
__a : str = kwargs.pop("""local_files_only""" , _lowercase )
if os.path.isdir(_lowercase ):
__a : str = os.path.join(_lowercase , _lowercase )
elif os.path.isfile(_lowercase ) or is_remote_url(_lowercase ):
__a : Dict = pretrained_model_name_or_path
else:
__a : List[str] = hf_bucket_url(_lowercase , filename=_lowercase , use_cdn=_lowercase )
try:
# Load from URL or cache if already cached
__a : Optional[Any] = cached_path(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__a : Optional[int] = Config.load_yaml(_lowercase )
except EnvironmentError:
__a : Dict = """Can't load config for"""
raise EnvironmentError(_lowercase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(_lowercase ), kwargs
def __magic_name__ ( _lowerCamelCase : List[str] ):
__a : Optional[Any] = torch.load("""dump.pt""" , map_location=in_tensor.device )
__a : Union[str, Any] = in_tensor.numpy()
__a : Dict = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ), (
F'''{sum([1 for x in np.isclose(_lowerCamelCase , _lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %'''
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def __magic_name__ ( _lowerCamelCase : List[str] ):
__a : List[str] = urlparse(_lowerCamelCase )
return parsed.scheme in ("http", "https")
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : Any=True ):
__a : Tuple = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__a : List[str] = """/""" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : Dict=None , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : str=None , ):
__a : str = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + "; ".join("""{}/{}""".format(_lowerCamelCase , _lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
ua += "; " + user_agent
__a : Tuple = {"""user-agent""": ua}
if resume_size > 0:
__a : Dict = """bytes=%d-""" % (resume_size,)
__a : Tuple = requests.get(_lowerCamelCase , stream=_lowerCamelCase , proxies=_lowerCamelCase , headers=_lowerCamelCase )
if response.status_code == 4_1_6: # Range not satisfiable
return
__a : str = response.headers.get("""Content-Length""" )
__a : List[str] = resume_size + int(_lowerCamelCase ) if content_length is not None else None
__a : List[str] = tqdm(
unit="""B""" , unit_scale=_lowerCamelCase , total=_lowerCamelCase , initial=_lowerCamelCase , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowerCamelCase ) )
temp_file.write(_lowerCamelCase )
progress.close()
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Tuple=False , _lowerCamelCase : str=None , _lowerCamelCase : Optional[Any]=1_0 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : List[str]=None , _lowerCamelCase : int=False , ):
if cache_dir is None:
__a : List[Any] = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__a : int = str(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
__a : Dict = None
if not local_files_only:
try:
__a : str = requests.head(_lowerCamelCase , allow_redirects=_lowerCamelCase , proxies=_lowerCamelCase , timeout=_lowerCamelCase )
if response.status_code == 2_0_0:
__a : Optional[int] = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__a : Optional[Any] = url_to_filename(_lowerCamelCase , _lowerCamelCase )
# get cache path to put the file
__a : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowerCamelCase ):
return cache_path
else:
__a : Optional[Any] = [
file
for file in fnmatch.filter(os.listdir(_lowerCamelCase ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_lowerCamelCase ) > 0:
return os.path.join(_lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__a : str = cache_path + """.lock"""
with FileLock(_lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__a : List[str] = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_lowerCamelCase , """a+b""" ) as f:
yield f
__a : Tuple = _resumable_file_manager
if os.path.exists(_lowerCamelCase ):
__a : List[Any] = os.stat(_lowerCamelCase ).st_size
else:
__a : int = 0
else:
__a : Union[str, Any] = partial(tempfile.NamedTemporaryFile , dir=_lowerCamelCase , delete=_lowerCamelCase )
__a : Optional[int] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _lowerCamelCase , temp_file.name , )
http_get(
_lowerCamelCase , _lowerCamelCase , proxies=_lowerCamelCase , resume_size=_lowerCamelCase , user_agent=_lowerCamelCase , )
os.replace(temp_file.name , _lowerCamelCase )
__a : Optional[Any] = {"""url""": url, """etag""": etag}
__a : Optional[Any] = cache_path + """.json"""
with open(_lowerCamelCase , """w""" ) as meta_file:
json.dump(_lowerCamelCase , _lowerCamelCase )
return cache_path
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]=None ):
__a : str = url.encode("""utf-8""" )
__a : str = shaaaa(_lowerCamelCase )
__a : int = url_hash.hexdigest()
if etag:
__a : Tuple = etag.encode("""utf-8""" )
__a : Any = shaaaa(_lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Dict=False , _lowerCamelCase : Tuple=None , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : List[str]=None , _lowerCamelCase : int=False , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Dict=False , ):
if cache_dir is None:
__a : int = TRANSFORMERS_CACHE
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__a : Tuple = str(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__a : Union[str, Any] = str(_lowerCamelCase )
if is_remote_url(_lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__a : Optional[Any] = get_from_cache(
_lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , proxies=_lowerCamelCase , resume_download=_lowerCamelCase , user_agent=_lowerCamelCase , local_files_only=_lowerCamelCase , )
elif os.path.exists(_lowerCamelCase ):
# File, and it exists.
__a : Union[str, Any] = url_or_filename
elif urlparse(_lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_lowerCamelCase ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(_lowerCamelCase ) and not tarfile.is_tarfile(_lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__a , __a : Dict = os.path.split(_lowerCamelCase )
__a : int = output_file.replace(""".""" , """-""" ) + """-extracted"""
__a : Any = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ) and os.listdir(_lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__a : List[str] = output_path + """.lock"""
with FileLock(_lowerCamelCase ):
shutil.rmtree(_lowerCamelCase , ignore_errors=_lowerCamelCase )
os.makedirs(_lowerCamelCase )
if is_zipfile(_lowerCamelCase ):
with ZipFile(_lowerCamelCase , """r""" ) as zip_file:
zip_file.extractall(_lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(_lowerCamelCase ):
__a : Union[str, Any] = tarfile.open(_lowerCamelCase )
tar_file.extractall(_lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_lowerCamelCase ) )
return output_path_extracted
return output_path
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]="," ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
with open(_lowerCamelCase ) as f:
__a : Dict = eval(f.read() )
else:
__a : List[Any] = requests.get(_lowerCamelCase )
try:
__a : Tuple = requests.json()
except Exception:
__a : Optional[Any] = req.content.decode()
assert data is not None, "could not connect"
try:
__a : Optional[int] = eval(_lowerCamelCase )
except Exception:
__a : Tuple = data.split("""\n""" )
req.close()
return data
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
__a : Any = requests.get(_lowerCamelCase )
__a : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def __magic_name__ ( _lowerCamelCase : Union[str, Any] ):
__a : Tuple = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowerCamelCase )
with open(_lowerCamelCase , """rb""" ) as stream:
__a : List[Any] = pkl.load(_lowerCamelCase )
__a : Tuple = weights.pop("""model""" )
__a : str = {}
for k, v in model.items():
__a : List[Any] = torch.from_numpy(_lowerCamelCase )
if "running_var" in k:
__a : Union[str, Any] = torch.tensor([0] )
__a : Dict = k.replace("""running_var""" , """num_batches_tracked""" )
__a : Tuple = zero
return new
def __magic_name__ ( ):
print(F'''{os.path.abspath(os.path.join(_lowerCamelCase , os.pardir ) )}/demo.ipynb''' )
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple="RGB" ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
if os.path.isfile(_lowerCamelCase ):
__a : List[Any] = cva.imread(_lowerCamelCase )
else:
__a : Optional[Any] = get_image_from_url(_lowerCamelCase )
assert img is not None, F'''could not connect to: {im}'''
__a : Tuple = cva.cvtColor(_lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__a : Dict = img[:, :, ::-1]
return img
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : Optional[Any]=1 ):
return (images[i : i + batch] for i in range(0 , len(_lowerCamelCase ) , _lowerCamelCase ))
| 63 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "unispeech"
def __init__(self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(512, 512, 512, 512, 512, 512, 512) , _lowercase=(5, 2, 2, 2, 2, 2, 2) , _lowercase=(10, 3, 3, 3, 3, 2, 2) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=False , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase=320 , _lowercase=2 , _lowercase=0.1 , _lowercase=100 , _lowercase=256 , _lowercase=256 , _lowercase=0.1 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=80 , _lowercase=0 , _lowercase=1 , _lowercase=2 , _lowercase=0.5 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
__a : Union[str, Any] = hidden_size
__a : Any = feat_extract_norm
__a : Union[str, Any] = feat_extract_activation
__a : Tuple = list(_lowercase )
__a : Dict = list(_lowercase )
__a : List[Any] = list(_lowercase )
__a : List[Any] = conv_bias
__a : Optional[Any] = num_conv_pos_embeddings
__a : Union[str, Any] = num_conv_pos_embedding_groups
__a : Dict = len(self.conv_dim )
__a : Dict = num_hidden_layers
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : int = num_attention_heads
__a : int = hidden_dropout
__a : Any = attention_dropout
__a : List[Any] = activation_dropout
__a : List[Any] = feat_proj_dropout
__a : Union[str, Any] = final_dropout
__a : str = layerdrop
__a : Dict = layer_norm_eps
__a : Dict = initializer_range
__a : Union[str, Any] = num_ctc_classes
__a : List[Any] = vocab_size
__a : Any = do_stable_layer_norm
__a : List[str] = use_weighted_layer_sum
__a : List[str] = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a : Dict = apply_spec_augment
__a : Union[str, Any] = mask_time_prob
__a : List[str] = mask_time_length
__a : Dict = mask_time_min_masks
__a : List[Any] = mask_feature_prob
__a : Tuple = mask_feature_length
__a : int = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__a : List[Any] = num_codevectors_per_group
__a : Union[str, Any] = num_codevector_groups
__a : List[Any] = contrastive_logits_temperature
__a : Any = feat_quantizer_dropout
__a : Optional[int] = num_negatives
__a : List[str] = codevector_dim
__a : List[Any] = proj_codevector_dim
__a : Tuple = diversity_loss_weight
# ctc loss
__a : Any = ctc_loss_reduction
__a : List[str] = ctc_zero_infinity
# pretraining loss
__a : Tuple = replace_prob
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 63 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase__ = abspath(join(dirname(dirname(__file__)), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __magic_name__ ( _lowerCamelCase : int ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Tuple ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
__a : List[str] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_lowerCamelCase , id=_lowerCamelCase )
| 63 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.02 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : Tuple = patch_size
__a : str = num_channels
__a : Union[str, Any] = is_training
__a : List[Any] = use_labels
__a : int = hidden_size
__a : Optional[Any] = num_hidden_layers
__a : List[Any] = num_attention_heads
__a : Dict = intermediate_size
__a : str = hidden_act
__a : Dict = hidden_dropout_prob
__a : str = attention_probs_dropout_prob
__a : Optional[int] = type_sequence_label_size
__a : Dict = initializer_range
__a : Dict = encoder_stride
__a : int = num_attention_outputs
__a : List[Any] = embed_dim
__a : Optional[Any] = embed_dim + 1
__a : Optional[Any] = resolution
__a : Optional[Any] = depths
__a : Union[str, Any] = hidden_sizes
__a : List[str] = dim
__a : Any = mlp_expansion_ratio
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = TFEfficientFormerModel(config=_lowercase )
__a : List[Any] = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Optional[Any] = self.type_sequence_label_size
__a : Any = TFEfficientFormerForImageClassification(_lowercase )
__a : Union[str, Any] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[Any] = 1
__a : int = TFEfficientFormerForImageClassification(_lowercase )
__a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : str = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = TFEfficientFormerModelTester(self )
__a : Any = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(_lowercase )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
__a : Tuple = model_class(_lowercase )
__a : int = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : str = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
__a : Any = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
__a : int = seq_length * self.model_tester.chunk_length
else:
__a : Any = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
__a : Optional[int] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
__a : Any = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Any = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = True
__a : Optional[int] = getattr(self.model_tester , """seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
__a : Dict = getattr(self.model_tester , """key_length""" , _lowercase )
__a : int = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
__a : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__a : List[Any] = True
__a : Tuple = False
__a : List[Any] = True
__a : int = model_class(_lowercase )
__a : List[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : Dict = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a : Optional[Any] = True
__a : List[str] = model_class(_lowercase )
__a : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
__a : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__a : Dict = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__a : Optional[Any] = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__a : Optional[Any] = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ):
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__a : Optional[Any] = self.default_image_processor
__a : List[str] = prepare_img()
__a : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : Optional[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : str = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : Dict = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__a : Any = self.default_image_processor
__a : str = prepare_img()
__a : str = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
__a : List[Any] = model(**_lowercase , training=_lowercase )
# verify the logits
__a : int = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a : List[str] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 63 | 1 |
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "owlvit_text_model"
def __init__(self , _lowercase=49408 , _lowercase=512 , _lowercase=2048 , _lowercase=12 , _lowercase=8 , _lowercase=16 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , _lowercase=0 , _lowercase=49406 , _lowercase=49407 , **_lowercase , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__a : Tuple = vocab_size
__a : Union[str, Any] = hidden_size
__a : Tuple = intermediate_size
__a : Union[str, Any] = num_hidden_layers
__a : Optional[Any] = num_attention_heads
__a : Union[str, Any] = max_position_embeddings
__a : List[Any] = hidden_act
__a : Tuple = layer_norm_eps
__a : Tuple = attention_dropout
__a : List[Any] = initializer_range
__a : List[Any] = initializer_factor
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : List[Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__a : Dict = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "owlvit_vision_model"
def __init__(self , _lowercase=768 , _lowercase=3072 , _lowercase=12 , _lowercase=12 , _lowercase=3 , _lowercase=768 , _lowercase=32 , _lowercase="quick_gelu" , _lowercase=1e-5 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1.0 , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : Any = hidden_size
__a : Tuple = intermediate_size
__a : str = num_hidden_layers
__a : int = num_attention_heads
__a : int = num_channels
__a : List[Any] = image_size
__a : int = patch_size
__a : Any = hidden_act
__a : int = layer_norm_eps
__a : Union[str, Any] = attention_dropout
__a : str = initializer_range
__a : Union[str, Any] = initializer_factor
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : List[str] = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
__a : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "owlvit"
_lowerCAmelCase = True
def __init__(self , _lowercase=None , _lowercase=None , _lowercase=512 , _lowercase=2.6592 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
if text_config is None:
__a : Union[str, Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
__a : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
__a : Tuple = OwlViTTextConfig(**_lowercase )
__a : Dict = OwlViTVisionConfig(**_lowercase )
__a : int = projection_dim
__a : List[str] = logit_scale_init_value
__a : List[str] = return_dict
__a : Any = 1.0
@classmethod
def lowerCAmelCase__(cls , _lowercase , **_lowercase ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
__a , __a : List[Any] = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
__a : Any = {}
__a : int = text_config
__a : Any = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = copy.deepcopy(self.__dict__ )
__a : List[str] = self.text_config.to_dict()
__a : Union[str, Any] = self.vision_config.to_dict()
__a : List[str] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 1e-4
def lowerCAmelCase__(self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = None , ):
'''simple docstring'''
__a : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
__a : Optional[int] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 14
| 63 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase=None , _lowercase=None , _lowercase=0 ):
'''simple docstring'''
__a : Any = 1.0 if scale is None else scale
__a : str = 0.0 if loc is None else loc
super().__init__(_lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowercase )] )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.variance.sqrt()
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase , _lowercase , _lowercase , **_lowercase ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : str = args_dim
__a : List[Any] = nn.ModuleList([nn.Linear(_lowercase , _lowercase ) for dim in args_dim.values()] )
__a : Dict = domain_map
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : List[Any] = [proj(_lowercase ) for proj in self.proj]
return self.domain_map(*_lowercase )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self , _lowercase ):
'''simple docstring'''
super().__init__()
__a : Optional[int] = function
def lowerCAmelCase__(self , _lowercase , *_lowercase ):
'''simple docstring'''
return self.function(_lowercase , *_lowercase )
class SCREAMING_SNAKE_CASE__ :
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__(self , _lowercase = 1 ):
'''simple docstring'''
__a : Optional[int] = dim
__a : str = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowercase )
else:
return Independent(self.distribution_class(*_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None , ):
'''simple docstring'''
__a : Tuple = self._base_distribution(_lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowercase , loc=_lowercase , scale=_lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return len(self.event_shape )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 0.0
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase__(self , *_lowercase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def lowerCAmelCase__(_lowercase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowercase ) + 4.0 )) / 2.0
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCAmelCase = StudentT
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
__a : Optional[Any] = 2.0 + cls.squareplus(_lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"loc": 1, "scale": 1}
_lowerCAmelCase = Normal
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : str = cls.squareplus(_lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = {"total_count": 1, "logits": 1}
_lowerCAmelCase = NegativeBinomial
@classmethod
def lowerCAmelCase__(cls , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = cls.squareplus(_lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a , __a : Optional[Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowercase , logits=_lowercase )
else:
return Independent(self.distribution_class(total_count=_lowercase , logits=_lowercase ) , 1 )
def lowerCAmelCase__(self , _lowercase , _lowercase = None , _lowercase = None ):
'''simple docstring'''
__a , __a : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 63 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowercase__ = {
"169M": 12,
"430M": 24,
"1B5": 24,
"3B": 32,
"7B": 32,
"14B": 40,
}
lowercase__ = {
"169M": 768,
"430M": 1024,
"1B5": 2048,
"3B": 2560,
"7B": 4096,
"14B": 5120,
}
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
__a : List[Any] = state_dict.pop(_lowerCamelCase )
# emb -> embedding
if name.startswith("""emb.""" ):
__a : str = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__a : Optional[Any] = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _lowerCamelCase )
# ffn -> feed_forward
__a : Tuple = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _lowerCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__a : Union[str, Any] = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__a : Optional[int] = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__a : Optional[Any] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__a : Dict = """rwkv.""" + name
__a : Union[str, Any] = weight
return state_dict
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Any , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : List[Any]=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__a : Optional[int] = 5_0_2_7_7
__a : Dict = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__a : List[Any] = PreTrainedTokenizerFast(tokenizer_file=_lowerCamelCase )
__a : List[Any] = len(_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
# 2. Build the config
__a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a : str = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F'''`size` should be one of {possible_sizes}, got {size}.''' )
__a : Union[str, Any] = RwkvConfig(
vocab_size=_lowerCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_lowerCamelCase )
# 3. Download model file then convert state_dict
__a : Tuple = hf_hub_download(_lowerCamelCase , _lowerCamelCase )
__a : int = torch.load(_lowerCamelCase , map_location="""cpu""" )
__a : Union[str, Any] = convert_state_dict(_lowerCamelCase )
# 4. Split in shards and save
__a , __a : str = shard_checkpoint(_lowerCamelCase )
for shard_file, shard in shards.items():
torch.save(_lowerCamelCase , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
if index is not None:
__a : int = os.path.join(_lowerCamelCase , _lowerCamelCase )
# Save the index as well
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
__a : List[str] = json.dumps(_lowerCamelCase , indent=2 , sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__a : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a : List[Any] = torch.load(os.path.join(_lowerCamelCase , _lowerCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_lowerCamelCase , _lowerCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__a : List[str] = AutoModelForCausalLM.from_pretrained(_lowerCamelCase )
model.push_to_hub(_lowerCamelCase , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
lowercase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 63 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list ):
if len(_lowerCamelCase ) < 2:
return collection
def circle_sort_util(_lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : int ) -> bool:
__a : Union[str, Any] = False
if low == high:
return swapped
__a : int = low
__a : Dict = high
while left < right:
if collection[left] > collection[right]:
__a , __a : List[Any] = (
collection[right],
collection[left],
)
__a : List[Any] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__a , __a : Optional[Any] = (
collection[right + 1],
collection[left],
)
__a : Dict = True
__a : Union[str, Any] = low + int((high - low) / 2 )
__a : Optional[int] = circle_sort_util(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : str = circle_sort_util(_lowerCamelCase , mid + 1 , _lowerCamelCase )
return swapped or left_swap or right_swap
__a : int = True
while is_not_sorted is True:
__a : Union[str, Any] = circle_sort_util(_lowerCamelCase , 0 , len(_lowerCamelCase ) - 1 )
return collection
if __name__ == "__main__":
lowercase__ = input("Enter numbers separated by a comma:\n").strip()
lowercase__ = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 63 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LEDTokenizer
_lowerCAmelCase = LEDTokenizerFast
_lowerCAmelCase = True
def lowerCAmelCase__(self ):
'''simple docstring'''
super().setUp()
__a : str = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__a : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[Any] = {"""unk_token""": """<unk>"""}
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowercase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowercase ) )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , **_lowercase ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__a : List[str] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(_lowercase , max_length=len(_lowercase ) , padding=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__a : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors="""pt""" )
self.assertIn("""input_ids""" , _lowercase )
self.assertIn("""attention_mask""" , _lowercase )
self.assertNotIn("""labels""" , _lowercase )
self.assertNotIn("""decoder_attention_mask""" , _lowercase )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Dict = tokenizer(text_target=_lowercase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ["""A long paragraph for summarization."""]
__a : Dict = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : int = tokenizer(_lowercase , return_tensors="""pt""" )
__a : Dict = tokenizer(text_target=_lowercase , return_tensors="""pt""" )
__a : List[str] = inputs["""input_ids"""]
__a : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__a : Optional[Any] = ["""Summary of the text.""", """Another summary."""]
__a : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__a : Union[str, Any] = tokenizer(_lowercase , padding=_lowercase )
__a : Tuple = [[0] * len(_lowercase ) for x in encoded_output["""input_ids"""]]
__a : Union[str, Any] = tokenizer.pad(_lowercase )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
__a : Union[str, Any] = """A, <mask> AllenNLP sentence."""
__a : Dict = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
__a : Tuple = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__a : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowercase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 63 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowercase__ = random.Random()
def __magic_name__ ( _lowerCamelCase : Dict , _lowerCamelCase : List[str]=1.0 , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[Any]=None ):
if rng is None:
__a : Dict = global_rng
__a : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self , _lowercase , _lowercase=7 , _lowercase=400 , _lowercase=2000 , _lowercase=1 , _lowercase=0.0 , _lowercase=16000 , _lowercase=True , _lowercase=True , ):
'''simple docstring'''
__a : Optional[Any] = parent
__a : str = batch_size
__a : Optional[Any] = min_seq_length
__a : str = max_seq_length
__a : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a : List[Any] = feature_size
__a : Optional[int] = padding_value
__a : Dict = sampling_rate
__a : Any = return_attention_mask
__a : Optional[int] = do_normalize
def lowerCAmelCase__(self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__(self , _lowercase=False , _lowercase=False ):
'''simple docstring'''
def _flatten(_lowercase ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
__a : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a : Dict = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = WavaVecaFeatureExtractor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(_lowercase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_lowercase , axis=0 ) - 1 ) < 1e-3 ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a : str = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : str = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test not batched input
__a : int = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
__a : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test batched
__a : Optional[Any] = feat_extract(_lowercase , return_tensors="""np""" ).input_values
__a : Optional[Any] = feat_extract(_lowercase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__a : Optional[int] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a : Dict = np.asarray(_lowercase )
__a : List[str] = feat_extract(_lowercase , return_tensors="""np""" ).input_values
__a : Union[str, Any] = feat_extract(_lowercase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
__a : int = [None, 1600, None]
for max_length, padding in zip(_lowercase , _lowercase ):
__a : Optional[int] = feat_extract(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors="""np""" )
__a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Optional[int] = range(800 , 1400 , 200 )
__a : Optional[Any] = [floats_list((1, x) )[0] for x in lengths]
__a : Any = ["""longest""", """max_length""", """do_not_pad"""]
__a : Any = [None, 1600, None]
for max_length, padding in zip(_lowercase , _lowercase ):
__a : Dict = feat_extract(_lowercase , max_length=_lowercase , padding=_lowercase )
__a : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Dict = feat_extract(
_lowercase , truncation=_lowercase , max_length=1000 , padding="""max_length""" , return_tensors="""np""" )
__a : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = feat_extract(
_lowercase , truncation=_lowercase , max_length=1000 , padding="""longest""" , return_tensors="""np""" )
__a : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
__a : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a : Union[str, Any] = feat_extract(
_lowercase , truncation=_lowercase , max_length=2000 , padding="""longest""" , return_tensors="""np""" )
__a : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a : List[str] = np.random.rand(100 ).astype(np.floataa )
__a : Tuple = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a : List[Any] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__a : Dict = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
__a : Dict = WavaVecaConfig.from_pretrained(_lowercase )
__a : Any = WavaVecaFeatureExtractor.from_pretrained(_lowercase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 63 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
parser.add_argument(
"--original_config_file",
type=str,
required=True,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--image_size",
default=512,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
"--use_linear_projection", help="Override for use linear projection", required=False, type=parse_bool
)
parser.add_argument("--cross_attention_dim", help="Override for cross attention_dim", required=False, type=int)
lowercase__ = parser.parse_args()
lowercase__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 63 | 1 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( _lowerCamelCase : int ):
__a : Optional[int] = prime_factors(_lowerCamelCase )
if is_square_free(_lowerCamelCase ):
return -1 if len(_lowerCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
import torch
from diffusers import DiffusionPipeline
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , _lowercase , _lowercase ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def __call__(self ):
'''simple docstring'''
__a : Dict = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__a : Optional[Any] = 1
__a : List[str] = self.unet(_lowercase , _lowercase ).sample
__a : Union[str, Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
__a : Optional[int] = scheduler_output - scheduler_output + torch.ones_like(_lowercase )
return result
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
__a : Dict = 0
__a : int = len(_lowerCamelCase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__a : Optional[int] = i + 1
else:
__a : Optional[int] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{two_pointer([2, 7, 11, 15], 9) = }')
| 63 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = "vit_msn"
def __init__(self , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3072 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1e-06 , _lowercase=224 , _lowercase=16 , _lowercase=3 , _lowercase=True , **_lowercase , ):
'''simple docstring'''
super().__init__(**_lowercase )
__a : int = hidden_size
__a : str = num_hidden_layers
__a : str = num_attention_heads
__a : Optional[Any] = intermediate_size
__a : Union[str, Any] = hidden_act
__a : Tuple = hidden_dropout_prob
__a : Any = attention_probs_dropout_prob
__a : List[Any] = initializer_range
__a : Any = layer_norm_eps
__a : Dict = image_size
__a : List[Any] = patch_size
__a : Dict = num_channels
__a : Optional[Any] = qkv_bias
| 63 | 1 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__(self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
__a : int = eval_examples
__a : Optional[Any] = post_process_function
def lowerCAmelCase__(self , _lowercase = None , _lowercase=None , _lowercase = None , _lowercase = "eval" , **_lowercase , ):
'''simple docstring'''
__a : Any = gen_kwargs.copy()
__a : int = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
__a : Optional[int] = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
__a : Tuple = gen_kwargs
__a : Optional[int] = self.eval_dataset if eval_dataset is None else eval_dataset
__a : Optional[Any] = self.get_eval_dataloader(_lowercase )
__a : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__a : Dict = self.compute_metrics
__a : List[Any] = None
__a : Dict = time.time()
__a : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a : Optional[Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
__a : List[str] = compute_metrics
__a : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__a : Union[str, Any] = self.post_process_function(_lowercase , _lowercase , _lowercase )
__a : Tuple = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__a : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
__a : Any = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__a : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" , **_lowercase ):
'''simple docstring'''
__a : Dict = gen_kwargs.copy()
__a : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
__a : Dict = self.compute_metrics
__a : List[str] = None
__a : Union[str, Any] = time.time()
__a : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__a : str = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
__a : Optional[int] = compute_metrics
__a : Any = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__a : str = self.post_process_function(_lowercase , _lowercase , _lowercase , """predict""" )
__a : Tuple = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__a : Any = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 63 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
lowercase__ = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
lowercase__ = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
lowercase__ = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRContextEncoderTokenizer
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = DPRQuestionEncoderTokenizer
lowercase__ = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
lowercase__ = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
lowercase__ = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ :
def __call__(self , _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , **_lowercase , ):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
__a : str = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
__a : str = titles if not isinstance(_lowercase , _lowercase ) else [titles]
__a : Optional[Any] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
__a : Tuple = len(_lowercase )
__a : Dict = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
assert len(_lowercase ) == len(
_lowercase ), F'''There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.'''
__a : Optional[Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : str = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )["""input_ids"""]
__a : Union[str, Any] = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
__a : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__a : str = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase = 16 , _lowercase = 64 , _lowercase = 4 , ):
'''simple docstring'''
__a : Union[str, Any] = reader_input["""input_ids"""]
__a , __a , __a : Optional[int] = reader_output[:3]
__a : int = len(_lowercase )
__a : Any = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
__a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
__a : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__a : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__a : int = sequence_ids.index(self.pad_token_id )
else:
__a : Optional[Any] = len(_lowercase )
__a : List[Any] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Tuple = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__a : str = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
__a : Union[str, Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__a : List[str] = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case ):
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = READER_PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = READER_PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = DPRReaderTokenizer
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = [True] * 1000001
lowercase__ = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
lowercase__ = False
i += 1
def __magic_name__ ( _lowerCamelCase : int ):
return seive[n]
def __magic_name__ ( _lowerCamelCase : int ):
return any(digit in """02468""" for digit in str(_lowerCamelCase ) )
def __magic_name__ ( _lowerCamelCase : int = 1_0_0_0_0_0_0 ):
__a : str = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(_lowerCamelCase ) and not contains_an_even_digit(_lowerCamelCase ):
__a : str = str(_lowerCamelCase )
__a : str = [int(str_num[j:] + str_num[:j] ) for j in range(len(_lowerCamelCase ) )]
if all(is_prime(_lowerCamelCase ) for i in list_nums ):
result.append(_lowerCamelCase )
return result
def __magic_name__ ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'{len(find_circular_primes()) = }')
| 63 |
"""simple docstring"""
import os
def __magic_name__ ( _lowerCamelCase : Dict ):
__a : List[str] = len(grid[0] )
__a : int = len(_lowerCamelCase )
__a : Tuple = 0
__a : List[Any] = 0
__a : List[str] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowerCamelCase ):
for j in range(n_rows - 3 ):
__a : List[Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
__a : Tuple = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
__a : List[Any] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
__a : List[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
__a : str = max(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if max_product > largest:
__a : Optional[Any] = max_product
return largest
def __magic_name__ ( ):
__a : Tuple = []
with open(os.path.dirname(_lowerCamelCase ) + """/grid.txt""" ) as file:
for line in file:
grid.append(line.strip("""\n""" ).split(""" """ ) )
__a : Tuple = [[int(_lowerCamelCase ) for i in grid[j]] for j in range(len(_lowerCamelCase ) )]
return largest_product(_lowerCamelCase )
if __name__ == "__main__":
print(solution())
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int ):
__a : list[list[str]] = [[] for _ in range(_lowerCamelCase )]
__a : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(_lowerCamelCase ) <= key:
return input_string
for position, character in enumerate(_lowerCamelCase ):
__a : str = position % (lowest * 2) # puts it in bounds
__a : Optional[Any] = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowerCamelCase )
__a : Dict = ["""""".join(_lowerCamelCase ) for row in temp_grid]
__a : Union[str, Any] = """""".join(_lowerCamelCase )
return output_string
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : int ):
__a : Tuple = []
__a : List[Any] = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
__a : list[list[str]] = [[] for _ in range(_lowerCamelCase )] # generates template
for position in range(len(_lowerCamelCase ) ):
__a : List[Any] = position % (lowest * 2) # puts it in bounds
__a : Tuple = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
__a : Tuple = 0
for row in temp_grid: # fills in the characters
__a : Union[str, Any] = input_string[counter : counter + len(_lowerCamelCase )]
grid.append(list(_lowerCamelCase ) )
counter += len(_lowerCamelCase )
__a : Dict = """""" # reads as zigzag
for position in range(len(_lowerCamelCase ) ):
__a : List[str] = position % (lowest * 2) # puts it in bounds
__a : Optional[Any] = min(_lowerCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def __magic_name__ ( _lowerCamelCase : str ):
__a : Tuple = {}
for key_guess in range(1 , len(_lowerCamelCase ) ): # tries every key
__a : Tuple = decrypt(_lowerCamelCase , _lowerCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 63 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( _lowerCamelCase : str ):
return [ord(_lowerCamelCase ) - 9_6 for elem in plain]
def __magic_name__ ( _lowerCamelCase : list[int] ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def __magic_name__ ( ):
__a : List[str] = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , _lowerCamelCase )
print("""Decoded:""" , decode(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 63 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowercase__ = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
_lowerCAmelCase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCAmelCase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowerCAmelCase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowerCAmelCase = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Optional[Any] = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}] )
__a : int = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : List[str] = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
# Legacy behavior
__a : Optional[int] = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
__a : Tuple = text_classifier("""This is great !""" , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}]] )
__a : Any = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
[{"""label""": """LABEL_0""", """score""": 0.504}, {"""label""": """LABEL_1""", """score""": 0.496}],
] , )
__a : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [
{"""label""": """LABEL_0""", """score""": 0.504},
{"""label""": """LABEL_0""", """score""": 0.504},
] , )
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
import torch
__a : Any = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
__a : Optional[int] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
__a : List[str] = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """LABEL_0""", """score""": 0.504}] )
@slow
@require_torch
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = pipeline("""text-classification""" )
__a : Tuple = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Optional[int] = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : Union[str, Any] = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
@slow
@require_tf
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = pipeline("""text-classification""" , framework="""tf""" )
__a : str = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
__a : Tuple = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
__a : str = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": """POSITIVE""", """score""": 0.988}] )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Dict = TextClassificationPipeline(model=_lowercase , tokenizer=_lowercase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[str] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__a : Union[str, Any] = """HuggingFace is in"""
__a : List[str] = text_classifier(_lowercase )
self.assertEqual(nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
__a : Optional[int] = ["""HuggingFace is in """, """Paris is in France"""]
__a : Dict = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}, {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__a : Dict = text_classifier(_lowercase , top_k=_lowercase )
__a : Dict = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_lowercase ) , [[{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N, [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] * N] , )
__a : Dict = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
__a : Any = text_classifier(_lowercase )
self.assertEqual(
nested_simplify(_lowercase ) , {"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__a : Dict = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(_lowercase ):
text_classifier(_lowercase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__a : Optional[int] = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(_lowercase ) , [{"""label""": ANY(_lowercase ), """score""": ANY(_lowercase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 63 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=128 , _lowercase=32 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
'''simple docstring'''
__a : List[str] = parent
__a : int = batch_size
__a : List[str] = seq_length
__a : List[Any] = is_training
__a : Optional[int] = use_input_mask
__a : int = use_token_type_ids
__a : List[Any] = use_labels
__a : List[str] = vocab_size
__a : Optional[Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Union[str, Any] = intermediate_size
__a : List[str] = hidden_act
__a : Any = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : Dict = max_position_embeddings
__a : int = type_vocab_size
__a : Tuple = type_sequence_label_size
__a : Any = initializer_range
__a : List[str] = num_labels
__a : Optional[int] = num_choices
__a : List[str] = scope
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : int = None
if self.use_input_mask:
__a : str = random_attention_mask([self.batch_size, self.seq_length] )
__a : Union[str, Any] = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : List[Any] = None
__a : Optional[Any] = None
__a : Tuple = None
if self.use_labels:
__a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__a : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__(self ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def lowerCAmelCase__(self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Dict = self.prepare_config_and_inputs()
__a : Any = True
__a : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : List[Any] = NezhaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Any = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
__a : str = model(_lowercase , token_type_ids=_lowercase )
__a : int = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
'''simple docstring'''
__a : Dict = True
__a : Tuple = NezhaModel(_lowercase )
model.to(_lowercase )
model.eval()
__a : List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
__a : Optional[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , encoder_hidden_states=_lowercase , )
__a : List[str] = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = NezhaForMaskedLM(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Any = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = NezhaForNextSentencePrediction(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Optional[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = NezhaForPreTraining(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : Any = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , next_sentence_label=_lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = NezhaForQuestionAnswering(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : List[Any] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , start_positions=_lowercase , end_positions=_lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Any = self.num_labels
__a : str = NezhaForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
__a : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : int = self.num_labels
__a : Tuple = NezhaForTokenClassification(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : int = model(_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
__a : Union[str, Any] = self.num_choices
__a : str = NezhaForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
__a : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : List[str] = model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[int] = config_and_inputs
__a : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_lowerCAmelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_lowerCAmelCase = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase = True
def lowerCAmelCase__(self , _lowercase , _lowercase , _lowercase=False ):
'''simple docstring'''
__a : Dict = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class in get_values(_lowercase ):
__a : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowercase )
__a : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = NezhaModelTester(self )
__a : str = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCAmelCase__(self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
__a : Optional[Any] = None
self.model_tester.create_and_check_model_as_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Union[str, Any] = NezhaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def lowerCAmelCase__(self ):
'''simple docstring'''
__a , __a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
__a : int = True
__a : Dict = model_class(config=_lowercase )
__a : Any = self._prepare_for_class(_lowercase , _lowercase )
__a : Dict = torch.jit.trace(
_lowercase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , """bert.pt""" ) )
__a : List[Any] = torch.jit.load(os.path.join(_lowercase , """bert.pt""" ) , map_location=_lowercase )
loaded(inputs_dict["""input_ids"""].to(_lowercase ) , inputs_dict["""attention_mask"""].to(_lowercase ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
__a : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a : str = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : List[str] = model(_lowercase , attention_mask=_lowercase )[0]
__a : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , _lowercase )
__a : Union[str, Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
__a : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__a : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__a : Tuple = model(_lowercase , attention_mask=_lowercase )[0]
__a : Dict = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , _lowercase )
__a : Optional[int] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 63 |
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : str = 0
__a : Optional[Any] = [0]
__a : int = [0]
__a : str = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
__a : int = [60]
__a : Union[str, Any] = [10]
__a : Tuple = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 0 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = 3
__a : str = [1, 2, 3]
__a : Optional[Any] = [3, 2, 1]
__a : int = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 5 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Dict = 50
__a : Tuple = [60, 100, 120]
__a : List[str] = [10, 20, 30]
__a : Union[str, Any] = len(_lowercase )
self.assertEqual(k.knapsack(_lowercase , _lowercase , _lowercase , _lowercase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 63 | 1 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] ):
__a : List[str] = 0
__a : Tuple = len(_lowerCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_lowerCamelCase ):
return None
__a : List[Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__a : List[Any] = left
__a : Any = point
elif point > right:
__a : List[Any] = right
__a : Tuple = point
else:
if item < current_item:
__a : int = point - 1
else:
__a : str = point + 1
return None
def __magic_name__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : int ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__a : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_lowerCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
elif point > right:
return interpolation_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
_lowerCamelCase , _lowerCamelCase , point + 1 , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : Optional[Any] ):
if collection != sorted(_lowerCamelCase ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
lowercase__ = 0
if debug == 1:
lowercase__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("Sequence must be ascending sorted to apply interpolation search")
lowercase__ = 67
lowercase__ = interpolation_search(collection, target)
if result is not None:
print(f'{target} found at positions: {result}')
else:
print("Not found")
| 63 |
"""simple docstring"""
from manim import *
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = Rectangle(height=0.5 , width=0.5 )
__a : Union[str, Any] = Rectangle(height=0.25 , width=0.25 )
__a : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a : Dict = [mem.copy() for i in range(6 )]
__a : str = [mem.copy() for i in range(6 )]
__a : Tuple = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[Any] = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Union[str, Any] = Text("""CPU""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(4 )]
__a : Dict = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = Text("""GPU""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
gpu.move_to([-1, -1, 0] )
self.add(_lowercase )
__a : List[Any] = [mem.copy() for i in range(6 )]
__a : Any = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Optional[Any] = Text("""Model""" , font_size=24 )
__a : Any = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
model.move_to([3, -1.0, 0] )
self.add(_lowercase )
__a : Tuple = []
__a : Tuple = []
__a : Optional[int] = []
for i, rect in enumerate(_lowercase ):
rect.set_stroke(_lowercase )
__a : str = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowercase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowercase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_lowercase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_lowercase , buff=0.0 )
self.add(_lowercase )
model_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase , *_lowercase )
__a : Optional[Any] = [mem.copy() for i in range(6 )]
__a : Union[str, Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__a : str = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_lowercase )
__a : Dict = []
__a : int = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = fill.copy().set_fill(_lowercase , opacity=0.7 )
target.move_to(_lowercase )
ckpt_arr.append(_lowercase )
__a : Union[str, Any] = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_lowercase )
self.add(*_lowercase , *_lowercase )
__a : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a : List[Any] = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowercase , _lowercase )
__a : str = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowercase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_lowercase )
__a : Optional[int] = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__a : List[Any] = [meta_mem.copy() for i in range(6 )]
__a : Optional[int] = [meta_mem.copy() for i in range(6 )]
__a : List[Any] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : List[str] = VGroup(*_lowercase ).arrange(_lowercase , buff=0 )
__a : Tuple = VGroup(_lowercase , _lowercase ).arrange(_lowercase , buff=0 )
__a : Dict = Text("""Disk""" , font_size=24 )
__a : Dict = Group(_lowercase , _lowercase ).arrange(_lowercase , buff=0.5 , aligned_edge=_lowercase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_lowercase , run_time=3 ) , Write(_lowercase , run_time=1 ) , Create(_lowercase , run_time=1 ) )
__a : Optional[Any] = []
for i, rect in enumerate(_lowercase ):
__a : List[str] = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_lowercase , run_time=1.5 ) )
self.play(*_lowercase )
self.play(FadeOut(_lowercase ) )
__a : List[str] = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowercase , run_time=3 ) )
self.play(
FadeOut(_lowercase , _lowercase , *_lowercase , *_lowercase ) , )
self.wait()
| 63 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = LDMTextToImagePipeline
_lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
_lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_lowerCAmelCase = False
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__a : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
__a : List[str] = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , latent_channels=4 , )
torch.manual_seed(0 )
__a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__a : Optional[Any] = CLIPTextModel(_lowercase )
__a : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__a : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vqvae""": vae,
"""bert""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Union[str, Any] = torch.manual_seed(_lowercase )
else:
__a : Any = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__a : List[str] = self.get_dummy_components()
__a : Union[str, Any] = LDMTextToImagePipeline(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Dict = self.get_dummy_inputs(_lowercase )
__a : List[Any] = pipe(**_lowercase ).images
__a : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__a : Union[str, Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self , _lowercase , _lowercase=torch.floataa , _lowercase=0 ):
'''simple docstring'''
__a : Union[str, Any] = torch.manual_seed(_lowercase )
__a : int = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
__a : Optional[int] = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__a : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[Any] = self.get_inputs(_lowercase )
__a : Any = pipe(**_lowercase ).images
__a : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__a : Union[str, Any] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
__a : int = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def lowerCAmelCase__(self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__(self , _lowercase , _lowercase=torch.floataa , _lowercase=0 ):
'''simple docstring'''
__a : List[str] = torch.manual_seed(_lowercase )
__a : str = np.random.RandomState(_lowercase ).standard_normal((1, 4, 32, 32) )
__a : List[Any] = torch.from_numpy(_lowercase ).to(device=_lowercase , dtype=_lowercase )
__a : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = LDMTextToImagePipeline.from_pretrained("""CompVis/ldm-text2im-large-256""" ).to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Tuple = self.get_inputs(_lowercase )
__a : str = pipe(**_lowercase ).images[0]
__a : Any = load_numpy(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy""" )
__a : List[str] = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __magic_name__ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ):
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def __magic_name__ ( _lowerCamelCase : Tuple ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4_e00 and cp <= 0x9_fff)
or (cp >= 0x3_400 and cp <= 0x4_dbf) #
or (cp >= 0x20_000 and cp <= 0x2a_6df) #
or (cp >= 0x2a_700 and cp <= 0x2b_73f) #
or (cp >= 0x2b_740 and cp <= 0x2b_81f) #
or (cp >= 0x2b_820 and cp <= 0x2c_eaf) #
or (cp >= 0xf_900 and cp <= 0xf_aff)
or (cp >= 0x2f_800 and cp <= 0x2f_a1f) #
): #
return True
return False
def __magic_name__ ( _lowerCamelCase : str ):
# word like '180' or '身高' or '神'
for char in word:
__a : List[str] = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def __magic_name__ ( _lowerCamelCase : List[str] ):
__a : Tuple = set()
for token in tokens:
__a : Optional[int] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
__a : Any = list(_lowerCamelCase )
return word_list
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : set() ):
if not chinese_word_set:
return bert_tokens
__a : Any = max([len(_lowerCamelCase ) for w in chinese_word_set] )
__a : Union[str, Any] = bert_tokens
__a , __a : List[Any] = 0, len(_lowerCamelCase )
while start < end:
__a : str = True
if is_chinese(bert_word[start] ):
__a : List[Any] = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
__a : Optional[int] = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__a : List[Any] = """##""" + bert_word[j]
__a : List[Any] = start + i
__a : int = False
break
if single_word:
start += 1
return bert_word
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : LTP , _lowerCamelCase : BertTokenizer ):
__a : Optional[int] = []
for i in range(0 , len(_lowerCamelCase ) , 1_0_0 ):
__a : Tuple = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
__a : Union[str, Any] = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
__a : Tuple = []
for i in range(0 , len(_lowerCamelCase ) , 1_0_0 ):
__a : Optional[int] = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
__a : List[Any] = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
__a : str = []
for id in input_ids:
__a : Union[str, Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
__a : Optional[int] = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
__a : Union[str, Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
__a : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def __magic_name__ ( _lowerCamelCase : List[Any] ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
__a : Tuple = f.readlines()
__a : List[Any] = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__a : List[Any] = LTP(args.ltp ) # faster in GPU device
__a : Dict = BertTokenizer.from_pretrained(args.bert )
__a : Optional[int] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
__a : int = [json.dumps(_lowerCamelCase ) + """\n""" for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
required=False,
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp",
required=False,
type=str,
default="./resources/ltp",
help="resources for LTP tokenizer, usually a path",
)
parser.add_argument(
"--bert",
required=False,
type=str,
default="./resources/robert",
help="resources for Bert tokenizer",
)
parser.add_argument(
"--save_path",
required=False,
type=str,
default="./resources/ref.txt",
help="path to save res",
)
lowercase__ = parser.parse_args()
main(args)
| 63 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __init__(self , _lowercase , _lowercase=7 , _lowercase=3 , _lowercase=30 , _lowercase=400 , _lowercase=True , _lowercase=None , _lowercase=True , _lowercase=[0.5, 0.5, 0.5] , _lowercase=[0.5, 0.5, 0.5] , _lowercase=True , _lowercase=1 / 255 , _lowercase=True , ):
'''simple docstring'''
__a : List[Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__a : Any = parent
__a : Union[str, Any] = batch_size
__a : List[str] = num_channels
__a : str = min_resolution
__a : List[Any] = max_resolution
__a : Optional[int] = do_resize
__a : List[str] = size
__a : Any = do_normalize
__a : Any = image_mean
__a : List[Any] = image_std
__a : str = do_rescale
__a : Optional[int] = rescale_factor
__a : int = do_pad
def lowerCAmelCase__(self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase__(self , _lowercase , _lowercase=False ):
'''simple docstring'''
if not batched:
__a : Union[str, Any] = image_inputs[0]
if isinstance(_lowercase , Image.Image ):
__a , __a : Optional[Any] = image.size
else:
__a , __a : Tuple = image.shape[1], image.shape[2]
if w < h:
__a : List[Any] = int(self.size["""shortest_edge"""] * h / w )
__a : Union[str, Any] = self.size["""shortest_edge"""]
elif w > h:
__a : str = self.size["""shortest_edge"""]
__a : Dict = int(self.size["""shortest_edge"""] * w / h )
else:
__a : Tuple = self.size["""shortest_edge"""]
__a : Dict = self.size["""shortest_edge"""]
else:
__a : Tuple = []
for image in image_inputs:
__a , __a : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__a : List[Any] = max(_lowercase , key=lambda _lowercase : item[0] )[0]
__a : str = max(_lowercase , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = DeformableDetrImageProcessor if is_vision_available() else None
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = DeformableDetrImageProcessingTester(self )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , """image_mean""" ) )
self.assertTrue(hasattr(_lowercase , """image_std""" ) )
self.assertTrue(hasattr(_lowercase , """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase , """do_resize""" ) )
self.assertTrue(hasattr(_lowercase , """do_rescale""" ) )
self.assertTrue(hasattr(_lowercase , """do_pad""" ) )
self.assertTrue(hasattr(_lowercase , """size""" ) )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _lowercase )
__a : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_lowercase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _lowercase )
def lowerCAmelCase__(self ):
'''simple docstring'''
pass
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
__a : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a : Tuple = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
__a : List[Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : str = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : List[Any] = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
__a , __a : Any = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
__a : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__a , __a : Dict = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a : Tuple = image_processing(_lowercase , return_tensors="""pt""" ).pixel_values
__a , __a : Optional[Any] = self.image_processor_tester.get_expected_values(_lowercase , batched=_lowercase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__a : str = json.loads(f.read() )
__a : Union[str, Any] = {"""image_id""": 39769, """annotations""": target}
# encode them
__a : List[Any] = DeformableDetrImageProcessor()
__a : List[str] = image_processing(images=_lowercase , annotations=_lowercase , return_tensors="""pt""" )
# verify pixel values
__a : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowercase )
__a : Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
__a : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowercase ) )
# verify boxes
__a : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowercase )
__a : Tuple = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowercase , atol=1e-3 ) )
# verify image_id
__a : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowercase ) )
# verify is_crowd
__a : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowercase ) )
# verify class_labels
__a : Any = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowercase ) )
# verify orig_size
__a : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowercase ) )
# verify size
__a : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowercase ) )
@slow
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__a : List[Any] = json.loads(f.read() )
__a : int = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
__a : Optional[int] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__a : Optional[int] = DeformableDetrImageProcessor(format="""coco_panoptic""" )
__a : List[Any] = image_processing(images=_lowercase , annotations=_lowercase , masks_path=_lowercase , return_tensors="""pt""" )
# verify pixel values
__a : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _lowercase )
__a : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _lowercase , atol=1e-4 ) )
# verify area
__a : List[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _lowercase ) )
# verify boxes
__a : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _lowercase )
__a : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _lowercase , atol=1e-3 ) )
# verify image_id
__a : Optional[int] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _lowercase ) )
# verify is_crowd
__a : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _lowercase ) )
# verify class_labels
__a : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _lowercase ) )
# verify masks
__a : str = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _lowercase )
# verify orig_size
__a : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _lowercase ) )
# verify size
__a : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _lowercase ) )
| 63 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float ):
# For applying gaussian function for each element in matrix.
__a : int = math.sqrt(_lowerCamelCase )
__a : Any = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ):
__a : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : float ):
# Creates a gaussian kernel of given dimension.
__a : int = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _lowerCamelCase ):
for j in range(0 , _lowerCamelCase ):
__a : Any = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase , _lowerCamelCase )
def __magic_name__ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : int , ):
__a : Tuple = np.zeros(img.shape )
__a : Optional[int] = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase )
__a , __a : int = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__a : List[str] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : Optional[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase )
__a : Optional[Any] = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Any = np.multiply(_lowerCamelCase , _lowerCamelCase )
__a : Tuple = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
__a : Optional[Any] = val
return imga
def __magic_name__ ( _lowerCamelCase : list ):
__a : Optional[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Union[str, Any] = float(args[2] ) if args[2:] else 1.0
__a : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__a : Any = int(args[4] )
__a : Any = kernel_size + abs(kernel_size % 2 - 1 )
else:
__a : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase__ , lowercase__ , lowercase__ , lowercase__ = parse_args(sys.argv)
lowercase__ = cva.imread(filename, 0)
cva.imshow("input image", img)
lowercase__ = img / 255
lowercase__ = out.astype("float32")
lowercase__ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase__ = out * 255
lowercase__ = np.uinta(out)
cva.imshow("output image", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 63 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowercase__ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_lowerCAmelCase = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__(self , **_lowercase ):
'''simple docstring'''
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__a : Optional[Any] = deprecated_arg[3:]
setattr(self , _lowercase , not kwargs.pop(_lowercase ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
__a : Any = kwargs.pop("""torchscript""" , self.torchscript )
__a : Dict = kwargs.pop("""torch_xla_tpu_print_metrics""" , self.torch_xla_tpu_print_metrics )
__a : Any = kwargs.pop("""fp16_opt_level""" , self.fpaa_opt_level )
super().__init__(**_lowercase )
_lowerCAmelCase = field(default=__snake_case , metadata={"help": "Trace the models using torchscript"} )
_lowerCAmelCase = field(default=__snake_case , metadata={"help": "Print Xla/PyTorch tpu metrics"} )
_lowerCAmelCase = field(
default="O1" , metadata={
"help": (
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
} , )
@cached_property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
logger.info("""PyTorch: setting up devices""" )
if not self.cuda:
__a : str = torch.device("""cpu""" )
__a : Optional[Any] = 0
elif is_torch_tpu_available():
__a : Optional[int] = xm.xla_device()
__a : Any = 0
else:
__a : List[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__a : Dict = torch.cuda.device_count()
return device, n_gpu
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return is_torch_tpu_available() and self.tpu
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[0]
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
requires_backends(self , ["""torch"""] )
return self._setup_devices[1]
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.n_gpu > 0
| 63 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __magic_name__ ( ):
__a : Dict = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 2_0, """a """ * 3_0, """b """ * 7],
}
__a : Optional[Any] = Dataset.from_dict(_lowerCamelCase )
return dataset
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = get_dataset()
__a : List[Any] = make_duplicate_clusters(_lowercase , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Tuple = get_dataset()
__a , __a : Optional[Any] = deduplicate_dataset(_lowercase )
self.assertEqual(len(_lowercase ) , 2 )
print(_lowercase )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , _lowercase )
| 63 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.