code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : str ):
lowercase__ : int = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ : str = model(SCREAMING_SNAKE_CASE )["last_hidden_state"]
lowercase__ : List[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
lowercase__ : Optional[Any] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 81 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 1 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """AutoTokenizer"""
lowercase_ = ["""tokenizer"""]
lowercase_ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=None ):
super().__init__(SCREAMING_SNAKE_CASE )
lowercase__ : str = speaker_embeddings
@classmethod
def snake_case ( cls : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple="speaker_embeddings_path.json" , **SCREAMING_SNAKE_CASE : Dict ):
if speaker_embeddings_dict_path is not None:
lowercase__ : List[str] = get_file_from_repo(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE ) , )
if speaker_embeddings_path is None:
logger.warning(
f"""`{os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" )
lowercase__ : Optional[int] = None
else:
with open(SCREAMING_SNAKE_CASE ) as speaker_embeddings_json:
lowercase__ : Tuple = json.load(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = None
lowercase__ : Tuple = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return cls(tokenizer=SCREAMING_SNAKE_CASE , speaker_embeddings=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any]="speaker_embeddings_path.json" , SCREAMING_SNAKE_CASE : str="speaker_embeddings" , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Tuple , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "v2" ) , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = {}
lowercase__ : Tuple = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ : Dict = self._load_voice_preset(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , SCREAMING_SNAKE_CASE , f"""{prompt_key}_{key}""" ) , voice_preset[key] , allow_pickle=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE , f"""{prompt_key}_{key}.npy""" )
lowercase__ : Tuple = tmp_dict
with open(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , "w" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
super().save_pretrained(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str = None , **SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = self.speaker_embeddings[voice_preset]
lowercase__ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f"""Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].""" )
lowercase__ : Tuple = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , SCREAMING_SNAKE_CASE ) , cache_dir=kwargs.pop("cache_dir" , SCREAMING_SNAKE_CASE ) , force_download=kwargs.pop("force_download" , SCREAMING_SNAKE_CASE ) , proxies=kwargs.pop("proxies" , SCREAMING_SNAKE_CASE ) , resume_download=kwargs.pop("resume_download" , SCREAMING_SNAKE_CASE ) , local_files_only=kwargs.pop("local_files_only" , SCREAMING_SNAKE_CASE ) , use_auth_token=kwargs.pop("use_auth_token" , SCREAMING_SNAKE_CASE ) , revision=kwargs.pop("revision" , SCREAMING_SNAKE_CASE ) , )
if path is None:
raise ValueError(
f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.""" )
lowercase__ : Optional[int] = np.load(SCREAMING_SNAKE_CASE )
return voice_preset_dict
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[dict] = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f"""Voice preset unrecognized, missing {key} as a key.""" )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f"""{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.""" )
def __call__( self : Any , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Dict="pt" , SCREAMING_SNAKE_CASE : str=256 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : List[str]=False , **SCREAMING_SNAKE_CASE : List[str] , ):
if voice_preset is not None and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ : Dict = self._load_voice_preset(SCREAMING_SNAKE_CASE )
else:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not voice_preset.endswith(".npz" ):
lowercase__ : Tuple = voice_preset + ".npz"
lowercase__ : Dict = np.load(SCREAMING_SNAKE_CASE )
if voice_preset is not None:
self._validate_voice_preset_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.tokenizer(
SCREAMING_SNAKE_CASE , return_tensors=SCREAMING_SNAKE_CASE , padding="max_length" , max_length=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , return_token_type_ids=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
if voice_preset is not None:
lowercase__ : int = voice_preset
return encoded_text
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : Optional[int] = "laion/clap-htsat-unfused"
lowercase__ : List[str] = tempfile.mkdtemp()
def snake_case ( self : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Any ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : Tuple = self.get_feature_extractor()
lowercase__ : Optional[Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.get_feature_extractor()
lowercase__ : str = self.get_tokenizer()
lowercase__ : Any = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_list((3, 1_000) )
lowercase__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : int = processor(audios=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.get_feature_extractor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Union[str, Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "This is a test string"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Any ):
lowercase__ : Optional[int] = self.get_feature_extractor()
lowercase__ : int = self.get_tokenizer()
lowercase__ : List[Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Union[str, Any] = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Dict = self.get_feature_extractor()
lowercase__ : List[Any] = self.get_tokenizer()
lowercase__ : Optional[Any] = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 81 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 1 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class snake_case__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Tuple=32 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Optional[int]=[8, 16, 32, 64] , SCREAMING_SNAKE_CASE : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : str="relu" , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : int=[2, 3, 4] , SCREAMING_SNAKE_CASE : Tuple=1 , ):
lowercase__ : Any = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = num_channels
lowercase__ : List[str] = embeddings_size
lowercase__ : Any = hidden_sizes
lowercase__ : Any = depths
lowercase__ : Optional[int] = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : Tuple = hidden_act
lowercase__ : Optional[Any] = num_labels
lowercase__ : Optional[Any] = scope
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = out_features
lowercase__ : List[str] = out_indices
lowercase__ : int = num_groups
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : str = self.get_config()
return config, pixel_values, labels
def snake_case ( self : str ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Union[str, Any] = BitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Optional[Any] = self.num_labels
lowercase__ : Optional[int] = BitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = BitBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : Dict = BitBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Any ):
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict ):
lowercase__ : Union[str, Any] = BitModelTester(self )
lowercase__ : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : Tuple ):
return
@unittest.skip(reason="Bit does not output attentions" )
def snake_case ( self : Optional[int] ):
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def snake_case ( self : int ):
pass
def snake_case ( self : Dict ):
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : List[Any] = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : int = model_class(config=SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : List[Any] = layer_type
lowercase__ : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = BitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def snake_case ( self : str ):
lowercase__ : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = self.default_image_processor
lowercase__ : Dict = prepare_img()
lowercase__ : Any = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Any = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : str = torch.tensor([[-0.6_526, -0.5_263, -1.4_398]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@require_torch
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (BitBackbone,) if is_torch_available() else ()
lowercase_ = BitConfig
lowercase_ = False
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = BitModelTester(self )
| 81 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 1 |
lowerCAmelCase__ = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCAmelCase__ = ['''a''', '''b''', '''c''', '''d''', '''e''']
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = start
# add current to visited
visited.append(lowerCamelCase__ )
lowercase__ : Optional[int] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
lowercase__ : int = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# if all neighbors visited add current to sort
sort.append(lowerCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCamelCase__ ) != len(lowerCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
lowercase__ : Tuple = topological_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
lowerCAmelCase__ = topological_sort('''a''', [], [])
print(sort)
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 1 |
from math import factorial
class snake_case__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Optional[Any] = real
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = [1] * rank
else:
lowercase__ : Dict = rank
def __repr__( self : List[Any] ):
return (
f"""{self.real}+"""
f"""{"+".join(str(SCREAMING_SNAKE_CASE )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def snake_case ( self : Any ):
lowercase__ : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , SCREAMING_SNAKE_CASE )
def __add__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return Dual(self.real + other , self.duals )
lowercase__ : int = self.duals.copy()
lowercase__ : str = other.duals.copy()
if len(SCREAMING_SNAKE_CASE ) > len(SCREAMING_SNAKE_CASE ):
o_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
elif len(SCREAMING_SNAKE_CASE ) < len(SCREAMING_SNAKE_CASE ):
s_dual.extend([1] * (len(SCREAMING_SNAKE_CASE ) - len(SCREAMING_SNAKE_CASE )) )
lowercase__ : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , SCREAMING_SNAKE_CASE )
lowercase_ = __add__
def __sub__( self : str , SCREAMING_SNAKE_CASE : Tuple ):
return self + other * -1
def __mul__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , SCREAMING_SNAKE_CASE )
lowercase__ : int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , SCREAMING_SNAKE_CASE )
lowercase_ = __mul__
def __truediv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , SCREAMING_SNAKE_CASE )
raise ValueError
def __floordiv__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : str = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , SCREAMING_SNAKE_CASE )
raise ValueError
def __pow__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
if n < 0 or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
lowercase__ : int = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if not callable(lowerCamelCase__ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase__ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("differentiate() requires an int as input for order" )
lowercase__ : Optional[Any] = Dual(lowerCamelCase__ , 1 )
lowercase__ : str = func(lowerCamelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 81 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """codegen"""
lowercase_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : Tuple=50_400 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , SCREAMING_SNAKE_CASE : Optional[int]=2_048 , SCREAMING_SNAKE_CASE : Optional[int]=4_096 , SCREAMING_SNAKE_CASE : Optional[int]=28 , SCREAMING_SNAKE_CASE : Tuple=16 , SCREAMING_SNAKE_CASE : Union[str, Any]=64 , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : List[Any]="gelu_new" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Dict=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : Any=1E-5 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=50_256 , SCREAMING_SNAKE_CASE : Any=50_256 , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : str , ):
lowercase__ : Optional[Any] = vocab_size
lowercase__ : int = n_ctx
lowercase__ : Tuple = n_positions
lowercase__ : List[Any] = n_embd
lowercase__ : List[str] = n_layer
lowercase__ : Any = n_head
lowercase__ : Any = n_inner
lowercase__ : Union[str, Any] = rotary_dim
lowercase__ : List[str] = activation_function
lowercase__ : Optional[int] = resid_pdrop
lowercase__ : List[str] = embd_pdrop
lowercase__ : Optional[Any] = attn_pdrop
lowercase__ : List[str] = layer_norm_epsilon
lowercase__ : List[Any] = initializer_range
lowercase__ : int = use_cache
lowercase__ : Any = bos_token_id
lowercase__ : Dict = eos_token_id
super().__init__(
bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : PretrainedConfig , SCREAMING_SNAKE_CASE : str = "default" , SCREAMING_SNAKE_CASE : List[PatchingSpec] = None , SCREAMING_SNAKE_CASE : bool = False , ):
super().__init__(SCREAMING_SNAKE_CASE , task=SCREAMING_SNAKE_CASE , patching_specs=SCREAMING_SNAKE_CASE , use_past=SCREAMING_SNAKE_CASE )
if not getattr(self._config , "pad_token_id" , SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
lowercase__ : Union[str, Any] = 0
@property
def snake_case ( self : Optional[Any] ):
lowercase__ : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE , direction="inputs" )
lowercase__ : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
lowercase__ : Optional[Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case ( self : List[str] ):
return self._config.n_layer
@property
def snake_case ( self : Optional[int] ):
return self._config.n_head
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : PreTrainedTokenizer , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : int = -1 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
lowercase__ : Optional[Any] = super(SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , seq_length=SCREAMING_SNAKE_CASE , is_pair=SCREAMING_SNAKE_CASE , framework=SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
lowercase__ : Optional[Any] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ , lowercase__ : str = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
lowercase__ : Union[str, Any] = seqlen + 2
lowercase__ : Union[str, Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ : Optional[Any] = [
(torch.zeros(SCREAMING_SNAKE_CASE ), torch.zeros(SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
lowercase__ : Dict = common_inputs["attention_mask"]
if self.use_past:
lowercase__ : str = ordered_inputs["attention_mask"].dtype
lowercase__ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def snake_case ( self : List[str] ):
return 13
| 81 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 1 |
from importlib import import_module
from .logging import get_logger
lowerCAmelCase__ = get_logger(__name__)
class snake_case__:
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
lowercase__ : Any = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = module._original_module if isinstance(SCREAMING_SNAKE_CASE , _PatchedModuleObj ) else module
class snake_case__:
"""simple docstring"""
lowercase_ = []
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int]=None ):
lowercase__ : List[str] = obj
lowercase__ : int = target
lowercase__ : List[Any] = new
lowercase__ : Union[str, Any] = target.split("." )[0]
lowercase__ : List[Any] = {}
lowercase__ : str = attrs or []
def __enter__( self : Any ):
*lowercase__ , lowercase__ : Dict = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE ) ):
try:
lowercase__ : Optional[Any] = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowercase__ : List[str] = getattr(self.obj , SCREAMING_SNAKE_CASE )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowercase__ : Tuple = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE , _PatchedModuleObj(SCREAMING_SNAKE_CASE , attrs=self.attrs ) )
lowercase__ : Union[str, Any] = getattr(self.obj , SCREAMING_SNAKE_CASE )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , attrs=self.attrs ) )
lowercase__ : Any = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowercase__ : List[Any] = getattr(import_module(".".join(SCREAMING_SNAKE_CASE ) ) , SCREAMING_SNAKE_CASE )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE ) is attr_value:
lowercase__ : Optional[int] = getattr(self.obj , SCREAMING_SNAKE_CASE )
setattr(self.obj , SCREAMING_SNAKE_CASE , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowercase__ : Any = globals()["__builtins__"][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE , self.original.pop(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple ):
self.__enter__()
self._active_patches.append(self )
def snake_case ( self : Any ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 81 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
import math
import qiskit
def __lowerCamelCase ( lowerCamelCase__ = 1 , lowerCamelCase__ = 1 , lowerCamelCase__ = 1 ):
"""simple docstring"""
if (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
or isinstance(lowerCamelCase__ , lowerCamelCase__ )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != input_a)
or (math.floor(lowerCamelCase__ ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
lowercase__ : Optional[Any] = qiskit.QuantumRegister(4 , "qr" )
lowercase__ : List[Any] = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
lowercase__ : List[Any] = [input_a, input_a, carry_in]
lowercase__ : Optional[Any] = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase__ ) # measure the last two qbits
lowercase__ : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
lowercase__ : int = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 81 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = StableDiffusionInpaintPipeline
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase_ = frozenset([] )
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE , )
lowercase__ : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
lowercase__ : Union[str, Any] = CLIPTextModel(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowercase__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE ) ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : int = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE ) ).convert("RGB" ).resize((64, 64) )
lowercase__ : List[str] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def snake_case ( self : str ):
lowercase__ : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : Union[str, Any] = StableDiffusionInpaintPipeline(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = sd_pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Union[str, Any] = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Any ):
lowercase__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
lowercase__ : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Dict = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
lowercase__ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def snake_case ( self : Any ):
lowercase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
lowercase__ : Any = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ : str = StableDiffusionInpaintPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
lowercase__ : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case ( self : List[str] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
lowercase__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
lowercase__ : Dict = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ : str = PNDMScheduler.from_pretrained(SCREAMING_SNAKE_CASE , subfolder="scheduler" )
lowercase__ : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ : List[Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np" , )
lowercase__ : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 81 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 1 |
from __future__ import annotations
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
create_state_space_tree(lowerCamelCase__ , [] , 0 )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if index == len(lowerCamelCase__ ):
print(lowerCamelCase__ )
return
create_state_space_tree(lowerCamelCase__ , lowerCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCamelCase__ , lowerCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase__ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 81 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
lowercase__ : str = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
lowercase__ : List[str] = -1
return False
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 81 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 1 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Any = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : str = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if accelerator.process_index == 0:
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : Dict = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : Tuple = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Saving model to {output_model_file}""" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , F"""{MODEL_NAME}_{model_index}""" )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F"""Saving model to {ckpt_dir}""" )
lowercase__ : Optional[Any] = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=lowerCamelCase__ , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Model saved to {ckpt_dir}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowerCamelCase__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
lowercase__ : Union[str, Any] = F"""{MODEL_NAME}.bin""" if model_index == 0 else F"""{MODEL_NAME}_{model_index}.bin"""
lowercase__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase__ : List[str] = torch.load(lowerCamelCase__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase__ : Any = (
F"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else F"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
lowercase__ : List[str] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Loading model from {input_model_file}""" )
lowercase__ : Union[str, Any] = torch.load(lowerCamelCase__ )
logger.info(F"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase__ : Optional[int] = (
os.path.join(lowerCamelCase__ , F"""{MODEL_NAME}_{model_index}""" )
if F"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading model from {ckpt_dir}""" )
lowercase__ : int = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowerCamelCase__ , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , planner=DefaultLoadPlanner() , )
lowercase__ : Tuple = state_dict["model"]
logger.info(F"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase__ : Dict = FSDP.optim_state_dict(lowerCamelCase__ , lowerCamelCase__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase__ : Optional[Any] = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Optimizer state saved in {output_optimizer_file}""" )
else:
lowercase__ : str = os.path.join(lowerCamelCase__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
logger.info(F"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(lowerCamelCase__ ) , planner=DefaultSavePlanner() , )
logger.info(F"""Optimizer state saved in {ckpt_dir}""" )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowerCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase__ : List[str] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase__ : Any = (
F"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else F"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
lowercase__ : Any = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
logger.info(F"""Loading Optimizer state from {input_optimizer_file}""" )
lowercase__ : int = torch.load(lowerCamelCase__ )
logger.info(F"""Optimizer state loaded from {input_optimizer_file}""" )
else:
lowercase__ : List[Any] = (
os.path.join(lowerCamelCase__ , F"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if F"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(F"""Loading Optimizer from {ckpt_dir}""" )
lowercase__ : Optional[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(lowerCamelCase__ ) , )
lowercase__ : Dict = optim_state["optimizer"]
logger.info(F"""Optimizer loaded from {ckpt_dir}""" )
lowercase__ : Optional[Any] = FSDP.optim_state_dict_to_load(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
optimizer.load_state_dict(lowerCamelCase__ )
| 81 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = CustomTokenizer
pass
| 81 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCAmelCase__ = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
lowerCAmelCase__ = {'''facebook/blenderbot_small-90M''': 5_1_2}
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = set()
lowercase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : int = char
lowercase__ : List[Any] = set(lowerCamelCase__ )
return pairs
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str]="__start__" , SCREAMING_SNAKE_CASE : Any="__end__" , SCREAMING_SNAKE_CASE : Dict="__unk__" , SCREAMING_SNAKE_CASE : Union[str, Any]="__null__" , **SCREAMING_SNAKE_CASE : str , ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as vocab_handle:
lowercase__ : str = json.load(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE , encoding="utf-8" ) as merges_handle:
lowercase__ : Optional[int] = merges_handle.read().split("\n" )[1:-1]
lowercase__ : str = [tuple(merge.split() ) for merge in merges]
lowercase__ : List[str] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Optional[Any] = {}
@property
def snake_case ( self : Optional[Any] ):
return len(self.encoder )
def snake_case ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str ):
if token in self.cache:
return self.cache[token]
lowercase__ : Optional[int] = re.sub("([.,!?()])" , r" \1" , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = re.sub("(')" , r" \1 " , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = re.sub(r"\s{2,}" , " " , SCREAMING_SNAKE_CASE )
if "\n" in token:
lowercase__ : Union[str, Any] = token.replace("\n" , " __newln__" )
lowercase__ : str = token.split(" " )
lowercase__ : int = []
for token in tokens:
if not len(SCREAMING_SNAKE_CASE ):
continue
lowercase__ : List[Any] = token.lower()
lowercase__ : Optional[Any] = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : str = get_pairs(SCREAMING_SNAKE_CASE )
if not pairs:
words.append(SCREAMING_SNAKE_CASE )
continue
while True:
lowercase__ : List[Any] = min(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : self.bpe_ranks.get(SCREAMING_SNAKE_CASE , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Optional[Any] = bigram
lowercase__ : str = []
lowercase__ : List[Any] = 0
while i < len(SCREAMING_SNAKE_CASE ):
try:
lowercase__ : Dict = word.index(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
lowercase__ : int = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : Union[str, Any] = tuple(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = new_word
if len(SCREAMING_SNAKE_CASE ) == 1:
break
else:
lowercase__ : List[Any] = get_pairs(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "@@ ".join(SCREAMING_SNAKE_CASE )
lowercase__ : Any = word[:-4]
lowercase__ : Dict = word
words.append(SCREAMING_SNAKE_CASE )
return " ".join(SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = []
lowercase__ : List[Any] = re.findall(r"\S+\n?" , SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE ).split(" " ) ) )
return split_tokens
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Union[str, Any] = token.lower()
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Optional[Any] = " ".join(SCREAMING_SNAKE_CASE ).replace("@@ " , "" ).strip()
return out_string
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : str = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE , ensure_ascii=SCREAMING_SNAKE_CASE ) + "\n" )
lowercase__ : Optional[Any] = 0
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : Dict = token_index
writer.write(" ".join(SCREAMING_SNAKE_CASE ) + "\n" )
index += 1
return vocab_file, merge_file
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : str = tau * frequency / samplerate
lowercase__ : Dict = sin(lowerCamelCase__ )
lowercase__ : Optional[Any] = cos(lowerCamelCase__ )
lowercase__ : Optional[int] = _sin / (2 * q_factor)
lowercase__ : List[str] = (1 - _cos) / 2
lowercase__ : Union[str, Any] = 1 - _cos
lowercase__ : Dict = 1 + alpha
lowercase__ : Dict = -2 * _cos
lowercase__ : Tuple = 1 - alpha
lowercase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : Union[str, Any] = tau * frequency / samplerate
lowercase__ : Any = sin(lowerCamelCase__ )
lowercase__ : Tuple = cos(lowerCamelCase__ )
lowercase__ : int = _sin / (2 * q_factor)
lowercase__ : Optional[Any] = (1 + _cos) / 2
lowercase__ : Optional[Any] = -1 - _cos
lowercase__ : str = 1 + alpha
lowercase__ : str = -2 * _cos
lowercase__ : int = 1 - alpha
lowercase__ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : Union[str, Any] = tau * frequency / samplerate
lowercase__ : Tuple = sin(lowerCamelCase__ )
lowercase__ : List[Any] = cos(lowerCamelCase__ )
lowercase__ : Tuple = _sin / (2 * q_factor)
lowercase__ : Tuple = _sin / 2
lowercase__ : Dict = 0
lowercase__ : Optional[Any] = -ba
lowercase__ : List[Any] = 1 + alpha
lowercase__ : Optional[int] = -2 * _cos
lowercase__ : List[Any] = 1 - alpha
lowercase__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__ : int = tau * frequency / samplerate
lowercase__ : List[str] = sin(lowerCamelCase__ )
lowercase__ : List[str] = cos(lowerCamelCase__ )
lowercase__ : Dict = _sin / (2 * q_factor)
lowercase__ : Optional[Any] = 1 - alpha
lowercase__ : str = -2 * _cos
lowercase__ : List[Any] = 1 + alpha
lowercase__ : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__ : List[Any] = tau * frequency / samplerate
lowercase__ : List[str] = sin(lowerCamelCase__ )
lowercase__ : Optional[int] = cos(lowerCamelCase__ )
lowercase__ : str = _sin / (2 * q_factor)
lowercase__ : Optional[int] = 10 ** (gain_db / 40)
lowercase__ : Tuple = 1 + alpha * big_a
lowercase__ : List[str] = -2 * _cos
lowercase__ : int = 1 - alpha * big_a
lowercase__ : Optional[int] = 1 + alpha / big_a
lowercase__ : Optional[int] = -2 * _cos
lowercase__ : str = 1 - alpha / big_a
lowercase__ : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__ : str = tau * frequency / samplerate
lowercase__ : Any = sin(lowerCamelCase__ )
lowercase__ : Any = cos(lowerCamelCase__ )
lowercase__ : Dict = _sin / (2 * q_factor)
lowercase__ : Tuple = 10 ** (gain_db / 40)
lowercase__ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase__ : Dict = (big_a + 1) + (big_a - 1) * _cos
lowercase__ : Dict = (big_a - 1) - (big_a + 1) * _cos
lowercase__ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase__ : Optional[int] = 2 * sqrt(lowerCamelCase__ ) * alpha
lowercase__ : int = big_a * (pmc + aaa)
lowercase__ : List[str] = 2 * big_a * mpc
lowercase__ : Optional[Any] = big_a * (pmc - aaa)
lowercase__ : Tuple = ppmc + aaa
lowercase__ : Any = -2 * pmpc
lowercase__ : Optional[int] = ppmc - aaa
lowercase__ : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__ : Tuple = tau * frequency / samplerate
lowercase__ : Any = sin(lowerCamelCase__ )
lowercase__ : Optional[int] = cos(lowerCamelCase__ )
lowercase__ : Any = _sin / (2 * q_factor)
lowercase__ : Dict = 10 ** (gain_db / 40)
lowercase__ : List[str] = (big_a + 1) - (big_a - 1) * _cos
lowercase__ : Dict = (big_a + 1) + (big_a - 1) * _cos
lowercase__ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase__ : Tuple = (big_a - 1) + (big_a + 1) * _cos
lowercase__ : int = 2 * sqrt(lowerCamelCase__ ) * alpha
lowercase__ : Optional[Any] = big_a * (ppmc + aaa)
lowercase__ : Tuple = -2 * big_a * pmpc
lowercase__ : List[Any] = big_a * (ppmc - aaa)
lowercase__ : Optional[Any] = pmc + aaa
lowercase__ : Tuple = 2 * mpc
lowercase__ : int = pmc - aaa
lowercase__ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 81 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "mock-s3-bucket"
lowercase__ : Optional[int] = F"""s3://{mock_bucket}"""
lowercase__ : Tuple = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path.startswith("s3://" ) is False
lowercase__ : Tuple = "./local/path"
lowercase__ : List[Any] = extract_path_from_uri(lowerCamelCase__ )
assert dataset_path == new_dataset_path
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is True
lowercase__ : int = fsspec.filesystem("file" )
lowercase__ : str = is_remote_filesystem(lowerCamelCase__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file}
lowercase__ : Optional[int] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase__ : str = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase__ )
lowercase__ : str = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase__ )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Optional[Any] = os.path.basename(lowerCamelCase__ )
lowercase__ : Union[str, Any] = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowerCamelCase__ , "r" , encoding="utf-8" ) as f, open(lowerCamelCase__ , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path}
lowercase__ : int = compressed_file_paths[protocol]
lowercase__ : List[Any] = "dataset.jsonl"
lowercase__ : str = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase__ , *lowercase__ : Any = fsspec.get_fs_token_paths(lowerCamelCase__ )
assert fs.isfile(lowerCamelCase__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = hf_api.dataset_info(lowerCamelCase__ , token=lowerCamelCase__ )
lowercase__ : int = HfFileSystem(repo_info=lowerCamelCase__ , token=lowerCamelCase__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowerCamelCase__ ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = "bz2"
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase__ , lowerCamelCase__ , clobber=lowerCamelCase__ )
with pytest.warns(lowerCamelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 81 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 1 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : Tuple ):
lowercase__ : str = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("google/mt5-small" )
lowercase__ : Dict = tokenizer("Hello there" , return_tensors="np" ).input_ids
lowercase__ : Dict = tokenizer("Hi I am" , return_tensors="np" ).input_ids
lowercase__ : List[Any] = shift_tokens_right(SCREAMING_SNAKE_CASE , model.config.pad_token_id , model.config.decoder_start_token_id )
lowercase__ : str = model(SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE ).logits
lowercase__ : Optional[int] = optax.softmax_cross_entropy(SCREAMING_SNAKE_CASE , onehot(SCREAMING_SNAKE_CASE , logits.shape[-1] ) ).mean()
lowercase__ : List[Any] = -(labels.shape[-1] * loss.item())
lowercase__ : int = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 81 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 1 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase__ : Tuple = 128
elif "12-12" in model_name:
lowercase__ : int = 12
lowercase__ : List[Any] = 12
elif "14-14" in model_name:
lowercase__ : Union[str, Any] = 14
lowercase__ : Any = 14
elif "16-16" in model_name:
lowercase__ : Union[str, Any] = 16
lowercase__ : int = 16
else:
raise ValueError("Model not supported" )
lowercase__ : Tuple = "huggingface/label-files"
if "speech-commands" in model_name:
lowercase__ : Union[str, Any] = 35
lowercase__ : List[Any] = "speech-commands-v2-id2label.json"
else:
lowercase__ : Optional[Any] = 527
lowercase__ : Union[str, Any] = "audioset-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : int = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = idalabel
lowercase__ : int = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "module.v" in name:
lowercase__ : str = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
lowercase__ : Any = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
lowercase__ : List[str] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
lowercase__ : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : Dict = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
lowercase__ : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Dict = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : Optional[int] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : Tuple = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : Tuple = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : int = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase__ : List[str] = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
lowercase__ : Optional[Any] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
lowercase__ : Union[str, Any] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : Union[str, Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Optional[Any] = key.split("." )
lowercase__ : List[Any] = int(key_split[3] )
lowercase__ : Union[str, Any] = config.hidden_size
if "weight" in key:
lowercase__ : Any = val[:dim, :]
lowercase__ : str = val[dim : dim * 2, :]
lowercase__ : List[str] = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : str = val[dim : dim * 2]
lowercase__ : Dict = val[-dim:]
else:
lowercase__ : str = val
return orig_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : Tuple = get_audio_spectrogram_transformer_config(lowerCamelCase__ )
lowercase__ : List[str] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
lowercase__ : Optional[int] = model_name_to_url[model_name]
lowercase__ : Tuple = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )
# remove some keys
remove_keys(lowerCamelCase__ )
# rename some keys
lowercase__ : List[str] = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
# load 🤗 model
lowercase__ : int = ASTForAudioClassification(lowerCamelCase__ )
model.eval()
model.load_state_dict(lowerCamelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase__ : Union[str, Any] = -4.2677393 if "speech-commands" not in model_name else -6.845978
lowercase__ : int = 4.5689974 if "speech-commands" not in model_name else 5.5654526
lowercase__ : str = 1_024 if "speech-commands" not in model_name else 128
lowercase__ : Union[str, Any] = ASTFeatureExtractor(mean=lowerCamelCase__ , std=lowerCamelCase__ , max_length=lowerCamelCase__ )
if "speech-commands" in model_name:
lowercase__ : Dict = load_dataset("speech_commands" , "v0.02" , split="validation" )
lowercase__ : Any = dataset[0]["audio"]["array"]
else:
lowercase__ : int = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
lowercase__ , lowercase__ : Dict = torchaudio.load(lowerCamelCase__ )
lowercase__ : List[Any] = waveform.squeeze().numpy()
lowercase__ : List[Any] = feature_extractor(lowerCamelCase__ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
lowercase__ : Dict = model(**lowerCamelCase__ )
lowercase__ : Any = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase__ : List[str] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase__ : List[str] = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase__ : int = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase__ : Any = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase__ : List[str] = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase__ : List[Any] = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase__ : Tuple = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase__ : int = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F"""MIT/{model_name}""" )
feature_extractor.push_to_hub(F"""MIT/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = R"\w+[.]\d+"
lowercase__ : Any = re.findall(lowerCamelCase__ , lowerCamelCase__ )
for pat in pats:
lowercase__ : Dict = key.replace(lowerCamelCase__ , "_".join(pat.split("." ) ) )
return key
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase__ : str = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase__ : str = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase__ : Any = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase__ : List[str] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase__ : List[str] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase__ : str = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowercase__ : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase__ : Union[str, Any] = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase__ : Tuple = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=42 ):
"""simple docstring"""
lowercase__ : Any = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase__ : List[str] = flax_model.init_weights(PRNGKey(lowerCamelCase__ ) )
lowercase__ : Optional[int] = flatten_dict(lowerCamelCase__ )
lowercase__ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase__ : Tuple = rename_key(lowerCamelCase__ )
lowercase__ : Any = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowercase__ , lowercase__ : int = rename_key_and_reshape_tensor(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
lowercase__ : str = jnp.asarray(lowerCamelCase__ )
return unflatten_dict(lowerCamelCase__ )
| 81 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class snake_case__:
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
lowercase__ : int = img
lowercase__ : Optional[Any] = img.shape[1]
lowercase__ : Tuple = img.shape[0]
lowercase__ : Tuple = dst_width
lowercase__ : List[Any] = dst_height
lowercase__ : Optional[int] = self.src_w / self.dst_w
lowercase__ : Union[str, Any] = self.src_h / self.dst_h
lowercase__ : List[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def snake_case ( self : Any ):
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Tuple = self.img[self.get_y(SCREAMING_SNAKE_CASE )][self.get_x(SCREAMING_SNAKE_CASE )]
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int ):
return int(self.ratio_x * x )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int ):
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase__ , lowerCAmelCase__ = 8_0_0, 6_0_0
lowerCAmelCase__ = imread('''image_data/lena.jpg''', 1)
lowerCAmelCase__ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 81 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 1 |
import os
from collections.abc import Iterator
def __lowerCamelCase ( lowerCamelCase__ = "." ):
"""simple docstring"""
for dir_path, dir_names, filenames in os.walk(lowerCamelCase__ ):
lowercase__ : Union[str, Any] = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(lowerCamelCase__ )[1] in (".py", ".ipynb"):
yield os.path.join(lowerCamelCase__ , lowerCamelCase__ ).lstrip("./" )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return F"""{i * " "}*""" if i else "\n##"
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(lowerCamelCase__ ) or old_parts[i] != new_part) and new_part:
print(F"""{md_prefix(lowerCamelCase__ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def __lowerCamelCase ( lowerCamelCase__ = "." ):
"""simple docstring"""
lowercase__ : int = ""
for filepath in sorted(good_file_paths(lowerCamelCase__ ) ):
lowercase__ , lowercase__ : Optional[Any] = os.path.split(lowerCamelCase__ )
if filepath != old_path:
lowercase__ : List[Any] = print_path(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : str = (filepath.count(os.sep ) + 1) if filepath else 0
lowercase__ : int = F"""{filepath}/{filename}""".replace(" " , "%20" )
lowercase__ : str = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(F"""{md_prefix(lowerCamelCase__ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('''.''')
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 1 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case__(unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A painting of a squirrel eating a burger "
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = generator.manual_seed(0 )
lowercase__ : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = "A painting of a squirrel eating a burger "
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : str = pipe(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
lowercase__ : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Union[str, Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 81 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
lowerCAmelCase__ = sys.version_info >= (3, 1_0)
def __lowerCamelCase ( lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=lowerCamelCase__ )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_2
lowercase_ = field(default="""toto""" , metadata={"""help""": """help message"""} )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = False
lowercase_ = True
lowercase_ = None
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """titi"""
lowercase_ = """toto"""
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """titi"""
lowercase_ = """toto"""
lowercase_ = 4_2
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = "toto"
def snake_case ( self : int ):
lowercase__ : Optional[Any] = BasicEnum(self.foo )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = "toto"
def snake_case ( self : Tuple ):
lowercase__ : List[str] = MixedTypeEnum(self.foo )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} )
lowercase_ = None
lowercase_ = list_field(default=[] )
lowercase_ = list_field(default=[] )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = list_field(default=[] )
lowercase_ = list_field(default=[1, 2, 3] )
lowercase_ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
lowercase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = field()
lowercase_ = field()
lowercase_ = field()
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = BasicEnum(self.required_enum )
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = field()
lowercase_ = None
lowercase_ = field(default="""toto""" , metadata={"""help""": """help message"""} )
lowercase_ = list_field(default=["""Hallo""", """Bonjour""", """Hello"""] )
if is_python_no_less_than_3_10:
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = False
lowercase_ = True
lowercase_ = None
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = field(default=_UpperCamelCase , metadata={"""help""": """help message"""} )
lowercase_ = None
lowercase_ = list_field(default=[] )
lowercase_ = list_field(default=[] )
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : argparse.ArgumentParser , SCREAMING_SNAKE_CASE : argparse.ArgumentParser ):
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowercase__ : Tuple = {k: v for k, v in vars(SCREAMING_SNAKE_CASE ).items() if k != "container"}
lowercase__ : Optional[Any] = {k: v for k, v in vars(SCREAMING_SNAKE_CASE ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , SCREAMING_SNAKE_CASE ) and yy.get("choices" , SCREAMING_SNAKE_CASE ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](SCREAMING_SNAKE_CASE ) , yy["type"](SCREAMING_SNAKE_CASE ) )
del xx["type"], yy["type"]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--flag" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , const=SCREAMING_SNAKE_CASE , nargs="?" )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowercase__) , ) : Optional[Any] = parser.parse_args_into_dataclasses(SCREAMING_SNAKE_CASE , look_for_args_file=SCREAMING_SNAKE_CASE )
self.assertFalse(example.flag )
def snake_case ( self : Tuple ):
lowercase__ : List[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=SCREAMING_SNAKE_CASE , help="help message" )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , const=SCREAMING_SNAKE_CASE , nargs="?" )
expected.add_argument("--baz" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , const=SCREAMING_SNAKE_CASE , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=SCREAMING_SNAKE_CASE , dest="baz" )
expected.add_argument("--opt" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
lowercase__ : int = HfArgumentParser(SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = parser.parse_args([] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
lowercase__ : int = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , opt=SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[Any] ):
lowercase__ : Any = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : List[Any] = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowercase__ : List[Any] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : Optional[int] = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowercase__ : Optional[Any] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowercase__ : Dict = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def snake_case ( self : int ):
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = "toto"
lowercase__ : Dict = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowercase__ : Optional[int] = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowercase__ : Union[str, Any] = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def snake_case ( self : Optional[Any] ):
lowercase__ : Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = parser.parse_args([] )
self.assertEqual(
SCREAMING_SNAKE_CASE , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowercase__ : Any = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def snake_case ( self : List[str] ):
lowercase__ : List[str] = argparse.ArgumentParser()
expected.add_argument("--foo" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--bar" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--baz" , default=SCREAMING_SNAKE_CASE , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--ces" , nargs="+" , default=[] , type=SCREAMING_SNAKE_CASE )
expected.add_argument("--des" , nargs="+" , default=[] , type=SCREAMING_SNAKE_CASE )
lowercase__ : str = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(SCREAMING_SNAKE_CASE )
for dataclass_type in dataclass_types:
lowercase__ : Dict = HfArgumentParser(SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = parser.parse_args([] )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=SCREAMING_SNAKE_CASE , bar=SCREAMING_SNAKE_CASE , baz=SCREAMING_SNAKE_CASE , ces=[] , des=[] ) )
lowercase__ : Optional[Any] = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(SCREAMING_SNAKE_CASE , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def snake_case ( self : Any ):
lowercase__ : str = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument("--required_str" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=SCREAMING_SNAKE_CASE , )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : str = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : int = argparse.ArgumentParser()
expected.add_argument("--foo" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=SCREAMING_SNAKE_CASE , )
expected.add_argument("--opt" , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE )
expected.add_argument("--baz" , default="toto" , type=SCREAMING_SNAKE_CASE , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=SCREAMING_SNAKE_CASE )
self.argparsersEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowercase__ : Optional[int] = parser.parse_dict(SCREAMING_SNAKE_CASE )[0]
lowercase__ : Optional[int] = BasicExample(**SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : str = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(SCREAMING_SNAKE_CASE , parser.parse_dict , SCREAMING_SNAKE_CASE , allow_extra_keys=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Dict = os.path.join(SCREAMING_SNAKE_CASE , "temp_json" )
os.mkdir(SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowercase__ : str = BasicExample(**SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : str = HfArgumentParser(SCREAMING_SNAKE_CASE )
lowercase__ : str = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "temp_yaml" )
os.mkdir(SCREAMING_SNAKE_CASE )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowercase__ : Union[str, Any] = BasicExample(**SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Union[str, Any] = HfArgumentParser(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 81 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="attention" ):
"""simple docstring"""
lowercase__ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
lowercase__ : List[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
lowercase__ : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
lowercase__ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
lowercase__ : Optional[int] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
lowercase__ : str = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
lowercase__ : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
lowercase__ : Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
if split_mlp_wi:
lowercase__ : List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
lowercase__ : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
lowercase__ : int = (wi_a, wi_a)
else:
lowercase__ : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
lowercase__ : Tuple = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCamelCase__ , *, lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : Any = traverse_util.flatten_dict(variables["target"] )
lowercase__ : Optional[int] = {"/".join(lowerCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowercase__ : Tuple = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowerCamelCase__ )
lowercase__ : List[Any] = collections.OrderedDict()
# Shared embeddings.
lowercase__ : Optional[Any] = old["token_embedder/embedding"]
# Encoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase__ : Dict = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_attention_layer_norm" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "attention" )
lowercase__ : List[Any] = layer_norm
lowercase__ : Optional[int] = k.T
lowercase__ : Union[str, Any] = o.T
lowercase__ : Any = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (MLP).
lowercase__ : Optional[int] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , "pre_mlp_layer_norm" )
lowercase__ , lowercase__ : Optional[int] = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "encoder" , lowerCamelCase__ )
lowercase__ : Any = layer_norm
if split_mlp_wi:
lowercase__ : Tuple = wi[0].T
lowercase__ : Optional[int] = wi[1].T
else:
lowercase__ : Optional[int] = wi.T
lowercase__ : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : Dict = tax_relpos_bias_lookup(
lowerCamelCase__ , lowerCamelCase__ , "encoder" ).T
lowercase__ : Tuple = old["encoder/encoder_norm/scale"]
if not scalable_attention:
lowercase__ : Tuple = tax_relpos_bias_lookup(
lowerCamelCase__ , 0 , "encoder" ).T
lowercase__ : Optional[Any] = tax_relpos_bias_lookup(
lowerCamelCase__ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCamelCase__ ):
# Block i, layer 0 (Self Attention).
lowercase__ : int = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_self_attention_layer_norm" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "self_attention" )
lowercase__ : Dict = layer_norm
lowercase__ : List[str] = k.T
lowercase__ : Dict = o.T
lowercase__ : Optional[int] = q.T
lowercase__ : Dict = v.T
# Block i, layer 1 (Cross Attention).
lowercase__ : Union[str, Any] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_cross_attention_layer_norm" )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = tax_attention_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "encoder_decoder_attention" )
lowercase__ : Tuple = layer_norm
lowercase__ : Union[str, Any] = k.T
lowercase__ : Optional[Any] = o.T
lowercase__ : Optional[Any] = q.T
lowercase__ : str = v.T
# Block i, layer 2 (MLP).
lowercase__ : List[Any] = tax_layer_norm_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , "pre_mlp_layer_norm" )
lowercase__ , lowercase__ : List[Any] = tax_mlp_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" , lowerCamelCase__ )
lowercase__ : str = layer_norm
if split_mlp_wi:
lowercase__ : Any = wi[0].T
lowercase__ : List[Any] = wi[1].T
else:
lowercase__ : Optional[Any] = wi.T
lowercase__ : int = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
lowercase__ : int = tax_relpos_bias_lookup(lowerCamelCase__ , lowerCamelCase__ , "decoder" ).T
lowercase__ : Any = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowercase__ : int = old["decoder/logits_dense/kernel"].T
return new
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowercase__ : Tuple = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowercase__ : List[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
lowercase__ : List[str] = state_dict["shared.weight"]
return state_dict
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = checkpoints.load_tax_checkpoint(lowerCamelCase__ )
lowercase__ : int = convert_tax_to_pytorch(
lowerCamelCase__ , num_layers=config.num_layers , is_encoder_only=lowerCamelCase__ , scalable_attention=lowerCamelCase__ )
lowercase__ : Tuple = make_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False , lowerCamelCase__ = False , ):
"""simple docstring"""
lowercase__ : Dict = MTaConfig.from_json_file(lowerCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowercase__ : Optional[int] = UMTaEncoderModel(lowerCamelCase__ )
else:
lowercase__ : Optional[int] = UMTaForConditionalGeneration(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCamelCase__ )
print("Done" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCAmelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 81 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
from math import pi, sqrt
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
if num > 171.5:
raise OverflowError("math range error" )
elif num - int(lowerCamelCase__ ) not in (0, 0.5):
raise NotImplementedError("num must be an integer or a half-integer" )
elif num == 0.5:
return sqrt(lowerCamelCase__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __lowerCamelCase ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(lowerCamelCase__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCAmelCase__ = 1.0
while num:
lowerCAmelCase__ = float(input('''Gamma of: '''))
print(f'''gamma({num}) = {gamma(num)}''')
print('''\nEnter 0 to exit...''')
| 81 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
lowerCAmelCase__ = {
"joule": 1.0,
"kilojoule": 1_0_0_0,
"megajoule": 1_0_0_0_0_0_0,
"gigajoule": 1_0_0_0_0_0_0_0_0_0,
"wattsecond": 1.0,
"watthour": 3_6_0_0,
"kilowatthour": 3_6_0_0_0_0_0,
"newtonmeter": 1.0,
"calorie_nutr": 4_1_8_6.8,
"kilocalorie_nutr": 4_1_8_6_8_0_0.0_0,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1_0_5_5.0_5_5_8_5,
"footpound": 1.355_818,
}
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase__ : str = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(lowerCamelCase__ )}"""
)
raise ValueError(lowerCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (DDIMParallelScheduler,)
lowercase_ = (("""eta""", 0.0), ("""num_inference_steps""", 5_0))
def snake_case ( self : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Union[str, Any] = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : str = 10, 0.0
lowercase__ : Optional[Any] = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
for t in scheduler.timesteps:
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
return sample
def snake_case ( self : List[str] ):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : List[str] = self.get_scheduler_config(steps_offset=1 )
lowercase__ : Optional[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case ( self : Dict ):
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE , beta_end=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE , prediction_type=SCREAMING_SNAKE_CASE , sample_max_value=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def snake_case ( self : Optional[Any] ):
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : Dict = 10, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.dummy_model()
lowercase__ : Dict = self.dummy_sample_deter
lowercase__ : str = self.dummy_sample_deter + 0.1
lowercase__ : Tuple = self.dummy_sample_deter - 0.1
lowercase__ : Optional[Any] = samplea.shape[0]
lowercase__ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase__ : Any = torch.arange(SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase__ : List[Any] = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , SCREAMING_SNAKE_CASE )
lowercase__ : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1E-2
assert abs(result_mean.item() - 0.4_982 ) < 1E-3
def snake_case ( self : int ):
lowercase__ : Optional[Any] = self.full_loop()
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 172.0_067 ) < 1E-2
assert abs(result_mean.item() - 0.223_967 ) < 1E-3
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.full_loop(prediction_type="v_prediction" )
lowercase__ : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[int] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 52.5_302 ) < 1E-2
assert abs(result_mean.item() - 0.0_684 ) < 1E-3
def snake_case ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase__ : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE , beta_start=0.01 )
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 149.8_295 ) < 1E-2
assert abs(result_mean.item() - 0.1_951 ) < 1E-3
def snake_case ( self : List[str] ):
# We specify different beta, so that the first alpha is 0.99
lowercase__ : str = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE , beta_start=0.01 )
lowercase__ : Optional[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 149.0_784 ) < 1E-2
assert abs(result_mean.item() - 0.1_941 ) < 1E-3
| 81 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 1 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCAmelCase__ = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
lowerCAmelCase__ = dataset.iloc[:, 1:2].values
lowerCAmelCase__ = dataset.iloc[:, 2].values
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCAmelCase__ = PolynomialFeatures(degree=4)
lowerCAmelCase__ = poly_reg.fit_transform(X)
lowerCAmelCase__ = LinearRegression()
pol_reg.fit(X_poly, y)
def __lowerCamelCase ( ):
"""simple docstring"""
plt.scatter(lowerCamelCase__ , lowerCamelCase__ , color="red" )
plt.plot(lowerCamelCase__ , pol_reg.predict(poly_reg.fit_transform(lowerCamelCase__ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 81 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Any = int(lowerCamelCase__ )
assert noofclusters < len(lowerCamelCase__ )
# Find out the dimensionality
lowercase__ : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase__ : List[str] = list(range(len(lowerCamelCase__ ) ) )
shuffle(lowerCamelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase__ : List[str] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase__ : List[Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase__ : Tuple = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(lowerCamelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase__ : Tuple = tf.placeholder("float64" , [dim] )
lowercase__ : List[str] = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase__ : str = [tf.Variable(0 ) for i in range(len(lowerCamelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase__ : Dict = tf.placeholder("int32" )
lowercase__ : List[Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase__ , lowerCamelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase__ : Tuple = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase__ : Optional[Any] = tf.reduce_mean(lowerCamelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase__ : Optional[int] = tf.placeholder("float" , [dim] )
lowercase__ : int = tf.placeholder("float" , [dim] )
lowercase__ : Any = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase__ , lowerCamelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase__ : Optional[int] = tf.placeholder("float" , [noofclusters] )
lowercase__ : List[Any] = tf.argmin(lowerCamelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase__ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase__ : Optional[int] = 100
for _ in range(lowerCamelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase__ ) ):
lowercase__ : Union[str, Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase__ : int = [
sess.run(lowerCamelCase__ , feed_dict={va: vect, va: sess.run(lowerCamelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase__ : Union[str, Any] = sess.run(
lowerCamelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase__ ):
# Collect all the vectors assigned to this cluster
lowercase__ : Optional[int] = [
vectors[i]
for i in range(len(lowerCamelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase__ : Optional[int] = sess.run(
lowerCamelCase__ , feed_dict={mean_input: array(lowerCamelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase__ : str = sess.run(lowerCamelCase__ )
lowercase__ : str = sess.run(lowerCamelCase__ )
return centroids, assignments
| 81 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
while second != 0:
lowercase__ : Dict = first & second
first ^= second
lowercase__ : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = int(input('''Enter the first number: ''').strip())
lowerCAmelCase__ = int(input('''Enter the second number: ''').strip())
print(f'''{add(first, second) = }''')
| 81 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __lowerCamelCase ( *lowerCamelCase__ ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : str = list(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[Any] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __lowerCamelCase ( lowerCamelCase__ = None , lowerCamelCase__ = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(lowerCamelCase__ , starting_batch_size=lowerCamelCase__ )
lowercase__ : Union[str, Any] = starting_batch_size
def decorator(*lowerCamelCase__ , **lowerCamelCase__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowercase__ : Optional[Any] = list(inspect.signature(lowerCamelCase__ ).parameters.keys() )
# Guard against user error
if len(lowerCamelCase__ ) < (len(lowerCamelCase__ ) + 1):
lowercase__ : Optional[int] = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
except Exception as e:
if should_reduce_batch_size(lowerCamelCase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 81 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
class snake_case__(Generic[T] ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : list[T] , SCREAMING_SNAKE_CASE : Callable[[T, T], T] ):
lowercase__ : Any | T = None
lowercase__ : int = len(SCREAMING_SNAKE_CASE )
lowercase__ : list[T] = [any_type for _ in range(self.N )] + arr
lowercase__ : List[Any] = fnc
self.build()
def snake_case ( self : Union[str, Any] ):
for p in range(self.N - 1 , 0 , -1 ):
lowercase__ : Union[str, Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : T ):
p += self.N
lowercase__ : Union[str, Any] = v
while p > 1:
lowercase__ : Tuple = p // 2
lowercase__ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ): # noqa: E741
lowercase__ , lowercase__ : List[str] = l + self.N, r + self.N
lowercase__ : T | None = None
while l <= r:
if l % 2 == 1:
lowercase__ : Union[str, Any] = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
lowercase__ : List[str] = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[r] )
lowercase__ , lowercase__ : Optional[int] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowerCAmelCase__ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowerCAmelCase__ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowerCAmelCase__ = SegmentTree(test_array, min)
lowerCAmelCase__ = SegmentTree(test_array, max)
lowerCAmelCase__ = SegmentTree(test_array, lambda a, b: a + b)
def __lowerCamelCase ( ):
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
for j in range(lowerCamelCase__ , len(lowerCamelCase__ ) ):
lowercase__ : str = reduce(lowerCamelCase__ , test_array[i : j + 1] )
lowercase__ : Union[str, Any] = reduce(lowerCamelCase__ , test_array[i : j + 1] )
lowercase__ : str = reduce(lambda lowerCamelCase__ , lowerCamelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert max_range == max_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
assert sum_range == sum_segment_tree.query(lowerCamelCase__ , lowerCamelCase__ )
test_all_segments()
for index, value in test_updates.items():
lowerCAmelCase__ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ : str = n - k
# Calculate C(n,k)
for i in range(lowerCamelCase__ ):
result *= n - i
result //= i + 1
return result
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return binomial_coefficient(2 * node_count , lowerCamelCase__ ) // (node_count + 1)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if n < 0:
raise ValueError("factorial() not defined for negative values" )
lowercase__ : str = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return catalan_number(lowerCamelCase__ ) * factorial(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 81 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 1 |
import qiskit
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowercase__ : int = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowercase__ : Union[str, Any] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 81 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCAmelCase__ = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCAmelCase__ = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
lowerCAmelCase__ = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCAmelCase__ = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCAmelCase__ = '''allenai'''
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = dict((re.sub(R"@@$" , "" , lowerCamelCase__ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , lowerCamelCase__ ), v) for k, v in d.items() )
lowercase__ : Optional[Any] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
lowercase__ : int = d[k] # restore
return da
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert os.path.exists(lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
print(F"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowercase__ : List[str] = basename(lowerCamelCase__ )
lowercase__ : Tuple = dirname(lowerCamelCase__ )
lowercase__ : List[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : List[Any] = cls.hub_models()
lowercase__ : List[Any] = {"bpe": "fastbpe", "tokenizer": "moses"}
lowercase__ : int = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F"""using checkpoint {checkpoint_file}""" )
lowercase__ : Optional[int] = hub_utils.from_pretrained(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , archive_map=lowerCamelCase__ , **lowerCamelCase__ )
lowercase__ : List[Any] = vars(chkpt["args"]["model"] )
lowercase__ : Tuple = args["source_lang"]
lowercase__ : Optional[int] = args["target_lang"]
lowercase__ : List[Any] = dirname(lowerCamelCase__ )
lowercase__ : Any = basename(lowerCamelCase__ )
# dicts
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dict.{src_lang}.txt""" )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dict.{tgt_lang}.txt""" )
lowercase__ : Any = Dictionary.load(lowerCamelCase__ )
lowercase__ : Dict = rewrite_dict_keys(src_dict.indices )
lowercase__ : str = len(lowerCamelCase__ )
lowercase__ : str = os.path.join(lowerCamelCase__ , "vocab-src.json" )
print(F"""Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : int = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : Any = False
break
lowercase__ : Dict = Dictionary.load(lowerCamelCase__ )
lowercase__ : Any = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Optional[int] = len(lowerCamelCase__ )
lowercase__ : Optional[Any] = os.path.join(lowerCamelCase__ , "vocab-tgt.json" )
print(F"""Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# merges_file (bpecodes)
lowercase__ : Optional[int] = os.path.join(lowerCamelCase__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Dict = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if os.path.exists(lowerCamelCase__ ):
break
with open(lowerCamelCase__ , encoding="utf-8" ) as fin:
lowercase__ : str = fin.read()
lowercase__ : Optional[int] = re.sub(R" \d+$" , "" , lowerCamelCase__ , 0 , re.M ) # remove frequency number
print(F"""Generating {merges_file}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as fout:
fout.write(lowerCamelCase__ )
# model config
lowercase__ : List[str] = os.path.join(lowerCamelCase__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F"""need to extend tokenizer to support bpe={args["bpe"]}"""
assert args["tokenizer"] == "moses", F"""need to extend tokenizer to support bpe={args["tokenizer"]}"""
lowercase__ : List[str] = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
lowercase__ : Dict = 5
lowercase__ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : List[Any] = best_score_hparams[model_dir]["length_penalty"]
else:
lowercase__ : List[str] = 1.0
print(F"""Generating {fsmt_model_config_file}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# tokenizer config
lowercase__ : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1_024,
"do_lower_case": do_lower_case,
}
print(F"""Generating {fsmt_tokenizer_config_file}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowerCamelCase__ , ensure_ascii=lowerCamelCase__ , indent=lowerCamelCase__ ) )
# model
lowercase__ : Tuple = chkpt["models"][0]
lowercase__ : str = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Optional[Any] = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : Tuple = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : List[str] = FSMTConfig.from_pretrained(lowerCamelCase__ )
lowercase__ : List[str] = FSMTForConditionalGeneration(lowerCamelCase__ )
# check that it loads ok
model_new.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
# save
lowercase__ : Union[str, Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
print(F"""Generating {pytorch_weights_dump_path}""" )
torch.save(lowerCamelCase__ , lowerCamelCase__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F"""cd {data_root}""" )
print(F"""transformers-cli upload {model_dir}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 1 |
import functools
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : int = len(lowerCamelCase__ )
lowercase__ : Any = len(lowerCamelCase__ )
@functools.cache
def min_distance(lowerCamelCase__ , lowerCamelCase__ ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowercase__ : List[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCamelCase__ ) , 1 + min_distance(lowerCamelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 1 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=13 , SCREAMING_SNAKE_CASE : Dict=[30, 30] , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : Any=32 , SCREAMING_SNAKE_CASE : Union[str, Any]=5 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Any=37 , SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : str=10 , SCREAMING_SNAKE_CASE : str=0.02 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=8 , SCREAMING_SNAKE_CASE : Optional[Any]=10 , ):
lowercase__ : Tuple = parent
lowercase__ : Any = batch_size
lowercase__ : Any = image_size
lowercase__ : str = patch_size
lowercase__ : int = num_channels
lowercase__ : Dict = is_training
lowercase__ : Dict = use_labels
lowercase__ : Optional[int] = hidden_size
lowercase__ : Dict = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Optional[int] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : Optional[Any] = type_sequence_label_size
lowercase__ : List[Any] = initializer_range
lowercase__ : List[str] = num_labels
lowercase__ : Optional[Any] = scope
lowercase__ : List[Any] = n_targets
lowercase__ : Optional[int] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
lowercase__ : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
lowercase__ : Any = num_patches + 1 + self.num_detection_tokens
def snake_case ( self : Optional[Any] ):
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
lowercase__ : int = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
lowercase__ : Union[str, Any] = []
for i in range(self.batch_size ):
lowercase__ : Dict = {}
lowercase__ : List[Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
lowercase__ : int = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = YolosModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : List[str] = YolosForObjectDetection(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(pixel_values=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
lowercase__ : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case ( self : Dict ):
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs
lowercase__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any=False ):
lowercase__ : int = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
lowercase__ : Optional[int] = []
for i in range(self.model_tester.batch_size ):
lowercase__ : int = {}
lowercase__ : Union[str, Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE , dtype=torch.long )
lowercase__ : List[str] = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE )
lowercase__ : str = labels
return inputs_dict
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = YolosModelTester(self )
lowercase__ : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
# YOLOS does not use inputs_embeds
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : str ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Any = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = True
# in YOLOS, the seq_len is different
lowercase__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
lowercase__ : Optional[Any] = False
lowercase__ : Optional[int] = True
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Any = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[str] = True
lowercase__ : int = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
lowercase__ : Any = len(SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Optional[Any] = True
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case ( self : Dict ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : str = outputs.hidden_states
lowercase__ : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# YOLOS has a different seq_length
lowercase__ : Union[str, Any] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Any ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Tuple ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def snake_case ( self : List[str] ):
lowercase__ : Union[str, Any] = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Optional[int] = model(inputs.pixel_values )
# verify outputs
lowercase__ : Dict = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Any = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify postprocessing
lowercase__ : Any = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
lowercase__ : str = torch.tensor([0.9_994, 0.9_790, 0.9_964, 0.9_972, 0.9_861] ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = [75, 75, 17, 63, 17]
lowercase__ : Dict = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(SCREAMING_SNAKE_CASE )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(results["boxes"][0, :] , SCREAMING_SNAKE_CASE ) )
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = current_set.copy()
for row_index, row in enumerate(lowerCamelCase__ ):
lowercase__ : Any = row[0]
for column_index, column in enumerate(lowerCamelCase__ ):
if magnitude == 0:
lowercase__ : Optional[Any] = column
continue
lowercase__ : Any = column / magnitude
# Subtract to cancel term
lowercase__ : List[Any] = current_set[0]
lowercase__ : str = [first_row]
lowercase__ : Dict = current_set[1::]
for row in current_set:
lowercase__ : List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCamelCase__ )
continue
for column_index in range(len(lowerCamelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCamelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase__ : Any = final_set[0]
lowercase__ : List[Any] = []
lowercase__ : Optional[int] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase__ : Optional[Any] = simplify(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCamelCase__ )
lowercase__ : Optional[Any] = resultant
return final_set
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
lowercase__ : Union[str, Any] = len(lowerCamelCase__ ) + 1
if any(len(lowerCamelCase__ ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(lowerCamelCase__ , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(lowerCamelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase__ : int = equations.copy()
if any(0 in row for row in data_set ):
lowercase__ : List[str] = data_set.copy()
lowercase__ : List[str] = []
for row_index, row in enumerate(lowerCamelCase__ ):
if 0 not in row:
lowercase__ : int = data_set.pop(lowerCamelCase__ )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , lowerCamelCase__ )
lowercase__ : Optional[Any] = data_set.copy()
lowercase__ : Dict = simplify(lowerCamelCase__ )
lowercase__ : Dict = simplified[::-1]
lowercase__ : list = []
for row in simplified:
lowercase__ : int = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase__ : Optional[Any] = row.copy()[: len(lowerCamelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCamelCase__ ) == 0:
solutions.append(0 )
continue
lowercase__ : Tuple = temp_row[1::]
lowercase__ : Any = temp_row[::-1]
for column_index, column in enumerate(lowerCamelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCamelCase__ )
lowercase__ : List[Any] = []
for item in solutions:
final.append(float(round(lowerCamelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 1 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : str = set(range(3 , lowerCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCamelCase__ , lowerCamelCase__ ) ) )
lowercase__ : List[Any] = [float(lowerCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCamelCase__ , limit + 1 , lowerCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 1 |
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(enum.Enum ):
"""simple docstring"""
lowercase_ = 0
lowercase_ = 1
@add_end_docstrings(_UpperCamelCase )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """generated"""
def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : int=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : str=None , **SCREAMING_SNAKE_CASE : Tuple , ):
lowercase__ : str = {}
if truncation is not None:
lowercase__ : Union[str, Any] = truncation
lowercase__ : Any = generate_kwargs
lowercase__ : Any = {}
if return_tensors is not None and return_type is None:
lowercase__ : Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase__ : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ : Any = self.tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
return True
def snake_case ( self : Tuple , *SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[Any] = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , SCREAMING_SNAKE_CASE ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
lowercase__ : Union[str, Any] = ([prefix + arg for arg in args[0]],)
lowercase__ : int = True
elif isinstance(args[0] , SCREAMING_SNAKE_CASE ):
lowercase__ : str = (prefix + args[0],)
lowercase__ : Optional[int] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase__ : int = self.tokenizer(*SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if (
isinstance(args[0] , SCREAMING_SNAKE_CASE )
and all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for el in args[0] )
and all(len(SCREAMING_SNAKE_CASE ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict=TruncationStrategy.DO_NOT_TRUNCATE , **SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Optional[Any] = self._parse_and_tokenize(SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return inputs
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
if self.framework == "pt":
lowercase__ , lowercase__ : Optional[int] = model_inputs["input_ids"].shape
elif self.framework == "tf":
lowercase__ , lowercase__ : Optional[Any] = tf.shape(model_inputs["input_ids"] ).numpy()
lowercase__ : int = generate_kwargs.get("min_length" , self.model.config.min_length )
lowercase__ : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(SCREAMING_SNAKE_CASE , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
lowercase__ : Any = self.model.generate(**SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = output_ids.shape[0]
if self.framework == "pt":
lowercase__ : str = output_ids.reshape(SCREAMING_SNAKE_CASE , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase__ : List[str] = tf.reshape(SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any]=ReturnType.TEXT , SCREAMING_SNAKE_CASE : List[str]=False ):
lowercase__ : int = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase__ : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase__ : Union[str, Any] = {
f"""{self.return_name}_text""": self.tokenizer.decode(
SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , )
}
records.append(SCREAMING_SNAKE_CASE )
return records
@add_end_docstrings(_UpperCamelCase )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """summary"""
def __call__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCamelCase )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """translation"""
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def snake_case ( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , SCREAMING_SNAKE_CASE ):
return self.tokenizer._build_translation_inputs(
*SCREAMING_SNAKE_CASE , return_tensors=self.framework , truncation=SCREAMING_SNAKE_CASE , src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE )
else:
return super()._parse_and_tokenize(*SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[str]=None , **SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ , lowercase__ , lowercase__ : int = super()._sanitize_parameters(**SCREAMING_SNAKE_CASE )
if src_lang is not None:
lowercase__ : Any = src_lang
if tgt_lang is not None:
lowercase__ : Optional[Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase__ : Dict = kwargs.get("task" , self.task )
lowercase__ : List[str] = task.split("_" )
if task and len(SCREAMING_SNAKE_CASE ) == 4:
# translation, XX, to YY
lowercase__ : str = items[1]
lowercase__ : Tuple = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return super().__call__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 81 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCAmelCase__ = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0_5_2_2, type=int)
lowerCAmelCase__ = parser.parse_args()
logger.info(f'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowerCAmelCase__ = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowerCAmelCase__ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCAmelCase__ = [0] * args.vocab_size
for k, v in counter.items():
lowerCAmelCase__ = v
logger.info(f'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 81 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : List[str]=3 , SCREAMING_SNAKE_CASE : List[str]=18 , SCREAMING_SNAKE_CASE : Optional[Any]=30 , SCREAMING_SNAKE_CASE : Union[str, Any]=400 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Any=[0.48_145_466, 0.4_578_275, 0.40_821_073] , SCREAMING_SNAKE_CASE : Optional[Any]=[0.26_862_954, 0.26_130_258, 0.27_577_711] , SCREAMING_SNAKE_CASE : int=True , ):
lowercase__ : Dict = size if size is not None else {"height": 224, "width": 224}
lowercase__ : List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase__ : List[str] = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : List[Any] = num_channels
lowercase__ : Any = image_size
lowercase__ : List[str] = min_resolution
lowercase__ : Any = max_resolution
lowercase__ : List[Any] = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : int = do_center_crop
lowercase__ : Optional[int] = crop_size
lowercase__ : str = do_normalize
lowercase__ : List[str] = image_mean
lowercase__ : Any = image_std
lowercase__ : Optional[Any] = do_convert_rgb
def snake_case ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Tuple=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
lowercase__ : Tuple = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
lowercase__ : Tuple = []
for i in range(self.batch_size ):
lowercase__ , lowercase__ : List[Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
lowercase__ : Optional[int] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
if torchify:
lowercase__ : Optional[Any] = [torch.from_numpy(SCREAMING_SNAKE_CASE ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = ChineseCLIPImageProcessingTester(self , do_center_crop=SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_convert_rgb" ) )
def snake_case ( self : Any ):
lowercase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case ( self : List[str] ):
pass
def snake_case ( self : Tuple ):
# Initialize image_processing
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : int = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : int = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : Dict ):
# Initialize image_processing
lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Tuple = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : str ):
# Initialize image_processing
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : List[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : List[str] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = ChineseCLIPImageProcessor if is_vision_available() else None
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = 3
@property
def snake_case ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_convert_rgb" ) )
def snake_case ( self : int ):
pass
def snake_case ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Optional[int] = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 1 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return array
lowercase__ , lowercase__ : Tuple = min(lowerCamelCase__ ), max(lowerCamelCase__ )
# Compute the variables
lowercase__ : str = _max - _min + 1
lowercase__ , lowercase__ : Dict = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
lowercase__ : int = i - _min
lowercase__ : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
lowercase__ : Optional[int] = 0
for i in range(lowerCamelCase__ ):
while holes_repeat[i] > 0:
lowercase__ : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = input('''Enter numbers separated by comma:\n''')
lowerCAmelCase__ = [int(x) for x in user_input.split(''',''')]
print(pigeon_sort(unsorted))
| 81 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 1 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class snake_case__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : Dict = parent
lowercase__ : Union[str, Any] = 13
lowercase__ : int = 7
lowercase__ : Any = 30
lowercase__ : int = self.seq_length + self.mem_len
lowercase__ : List[Any] = 15
lowercase__ : List[str] = True
lowercase__ : Optional[Any] = True
lowercase__ : Optional[int] = 99
lowercase__ : int = [10, 50, 80]
lowercase__ : int = 32
lowercase__ : List[Any] = 32
lowercase__ : Union[str, Any] = 4
lowercase__ : str = 8
lowercase__ : Dict = 128
lowercase__ : List[str] = 2
lowercase__ : Any = 2
lowercase__ : Optional[Any] = None
lowercase__ : str = 1
lowercase__ : Optional[Any] = 0
lowercase__ : Tuple = 3
lowercase__ : Any = self.vocab_size - 1
lowercase__ : str = 0.01
def snake_case ( self : List[Any] ):
lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def snake_case ( self : Union[str, Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = TFTransfoXLModel(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : Any = model(SCREAMING_SNAKE_CASE ).to_tuple()
lowercase__ : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a}
lowercase__ , lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : List[Any] = TFTransfoXLLMHeadModel(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ).to_tuple()
lowercase__ : str = {"input_ids": input_ids_a, "labels": lm_labels}
lowercase__ , lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE ).to_tuple()
lowercase__ , lowercase__ : int = model([input_ids_a, mems_a] ).to_tuple()
lowercase__ : Union[str, Any] = {"input_ids": input_ids_a, "mems": mems_a, "labels": lm_labels}
lowercase__ , lowercase__ : str = model(SCREAMING_SNAKE_CASE ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = TFTransfoXLForSequenceClassification(SCREAMING_SNAKE_CASE )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : List[Any] ):
lowercase__ : Dict = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) : Union[str, Any] = config_and_inputs
lowercase__ : Tuple = {"input_ids": input_ids_a}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase_ = () if is_tf_available() else ()
lowercase_ = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def snake_case ( self : int ):
lowercase__ : Any = TFTransfoXLModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , d_embed=37 )
def snake_case ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : List[str] ):
self.model_tester.set_seed()
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
self.model_tester.set_seed()
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase__ : List[str] = model.get_output_embeddings()
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer )
lowercase__ : List[str] = model.get_bias()
assert name is None
else:
lowercase__ : int = model.get_output_embeddings()
assert x is None
lowercase__ : Optional[int] = model.get_bias()
assert name is None
def snake_case ( self : str ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def snake_case ( self : str ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = TFTransfoXLModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@unittest.skip(reason="This model doesn't play well with fit() due to not returning a single loss." )
def snake_case ( self : Union[str, Any] ):
pass
@require_tf
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@unittest.skip("Skip test until #12651 is resolved." )
@slow
def snake_case ( self : Optional[Any] ):
lowercase__ : Any = TFTransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103" )
# fmt: off
lowercase__ : int = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__ : str = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__ : Dict = model.generate(SCREAMING_SNAKE_CASE , max_length=200 , do_sample=SCREAMING_SNAKE_CASE )
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE )
| 81 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """xlm"""
lowercase_ = {
"""hidden_size""": """emb_dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
"""n_words""": """vocab_size""", # For backward compatibility
}
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[int]=30_145 , SCREAMING_SNAKE_CASE : Union[str, Any]=2_048 , SCREAMING_SNAKE_CASE : List[Any]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=16 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : str=0.1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : List[str]=False , SCREAMING_SNAKE_CASE : List[Any]=1 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=512 , SCREAMING_SNAKE_CASE : Tuple=2_048**-0.5 , SCREAMING_SNAKE_CASE : Dict=1E-1_2 , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : str="first" , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Dict=0 , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Dict=0 , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : Tuple = vocab_size
lowercase__ : Optional[Any] = emb_dim
lowercase__ : int = n_layers
lowercase__ : Tuple = n_heads
lowercase__ : Optional[int] = dropout
lowercase__ : int = attention_dropout
lowercase__ : Optional[int] = gelu_activation
lowercase__ : Optional[int] = sinusoidal_embeddings
lowercase__ : str = causal
lowercase__ : Optional[int] = asm
lowercase__ : Optional[int] = n_langs
lowercase__ : List[str] = use_lang_emb
lowercase__ : List[str] = layer_norm_eps
lowercase__ : Optional[Any] = bos_index
lowercase__ : int = eos_index
lowercase__ : Dict = pad_index
lowercase__ : Tuple = unk_index
lowercase__ : List[str] = mask_index
lowercase__ : Dict = is_encoder
lowercase__ : Any = max_position_embeddings
lowercase__ : Optional[Any] = embed_init_std
lowercase__ : Any = init_std
lowercase__ : List[str] = summary_type
lowercase__ : Union[str, Any] = summary_use_proj
lowercase__ : Any = summary_activation
lowercase__ : List[Any] = summary_proj_to_labels
lowercase__ : Union[str, Any] = summary_first_dropout
lowercase__ : Tuple = start_n_top
lowercase__ : List[Any] = end_n_top
lowercase__ : List[str] = mask_token_id
lowercase__ : List[str] = lang_id
if "n_words" in kwargs:
lowercase__ : Union[str, Any] = kwargs["n_words"]
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
if self.task == "multiple-choice":
lowercase__ : Any = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 81 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 81 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 1 |
import math
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = len(lowerCamelCase__ )
lowercase__ : List[str] = int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
lowercase__ : Tuple = 0
while arr[min(lowerCamelCase__ , lowerCamelCase__ ) - 1] < x:
lowercase__ : List[str] = step
step += int(math.floor(math.sqrt(lowerCamelCase__ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowercase__ : int = prev + 1
if prev == min(lowerCamelCase__ , lowerCamelCase__ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
lowerCAmelCase__ = int(input('''Enter the number to be searched:\n'''))
lowerCAmelCase__ = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f'''Number {x} is at index {res}''')
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
import string
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
for key in range(len(string.ascii_uppercase ) ):
lowercase__ : List[Any] = ""
for symbol in message:
if symbol in string.ascii_uppercase:
lowercase__ : List[Any] = string.ascii_uppercase.find(lowerCamelCase__ )
lowercase__ : List[str] = num - key
if num < 0:
lowercase__ : Dict = num + len(string.ascii_uppercase )
lowercase__ : int = translated + string.ascii_uppercase[num]
else:
lowercase__ : Union[str, Any] = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = input("Encrypted message: " )
lowercase__ : Dict = message.upper()
decrypt(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 81 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
from math import sqrt
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ = 10_001 ):
"""simple docstring"""
lowercase__ : Optional[Any] = 0
lowercase__ : int = 1
while count != nth and number < 3:
number += 1
if is_prime(lowerCamelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(lowerCamelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_attention_heads" ) )
class snake_case__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=13 , SCREAMING_SNAKE_CASE : int=64 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : int=16 , SCREAMING_SNAKE_CASE : Optional[Any]=[128, 256, 384] , SCREAMING_SNAKE_CASE : Optional[int]=[4, 6, 8] , SCREAMING_SNAKE_CASE : Any=[2, 3, 4] , SCREAMING_SNAKE_CASE : Any=[16, 16, 16] , SCREAMING_SNAKE_CASE : Optional[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=[2, 2, 2] , SCREAMING_SNAKE_CASE : Dict=[2, 2, 2] , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : int=2 , ):
lowercase__ : Optional[Any] = parent
lowercase__ : Dict = batch_size
lowercase__ : str = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Optional[Any] = kernel_size
lowercase__ : List[Any] = stride
lowercase__ : Tuple = padding
lowercase__ : int = hidden_sizes
lowercase__ : Dict = num_attention_heads
lowercase__ : Union[str, Any] = depths
lowercase__ : Dict = key_dim
lowercase__ : int = drop_path_rate
lowercase__ : Any = patch_size
lowercase__ : Optional[Any] = attention_ratio
lowercase__ : List[str] = mlp_ratio
lowercase__ : Union[str, Any] = initializer_range
lowercase__ : Dict = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ : int = is_training
lowercase__ : Any = use_labels
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
def snake_case ( self : Optional[int] ):
lowercase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : int = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : int ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Tuple = LevitModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (self.image_size, self.image_size)
lowercase__ , lowercase__ : str = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ : Optional[Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ : Optional[int] = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Dict = self.num_labels
lowercase__ : Tuple = LevitForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : int ):
lowercase__ : Union[str, Any] = LevitModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : str ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def snake_case ( self : int ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Dict = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Tuple = [*signature.parameters.keys()]
lowercase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = outputs.hidden_states
lowercase__ : str = len(self.model_tester.depths ) + 1
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ : int = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ : List[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Any = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(SCREAMING_SNAKE_CASE )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ : Dict = False
lowercase__ : int = True
for model_class in self.all_model_classes:
if model_class in get_values(SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : List[str] ):
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(SCREAMING_SNAKE_CASE ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
lowercase__ : int = problem_type["title"]
lowercase__ : Optional[int] = problem_type["num_labels"]
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if problem_type["num_labels"] > 1:
lowercase__ : Dict = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
lowercase__ : str = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE ) as warning_list:
lowercase__ : List[str] = model(**SCREAMING_SNAKE_CASE ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def snake_case ( self : Optional[Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Optional[Any] = LevitModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[str] ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self : Dict ):
lowercase__ : Tuple = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.default_image_processor
lowercase__ : Any = prepare_img()
lowercase__ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Dict = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 1 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
lowerCAmelCase__ = 4
lowerCAmelCase__ = 3
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
pass
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
for shard in shards:
for i in range(lowerCamelCase__ ):
yield {"i": i, "shard": shard}
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = int(os.environ["RANK"] )
lowercase__ : Optional[Any] = int(os.environ["WORLD_SIZE"] )
lowercase__ : Tuple = ArgumentParser()
parser.add_argument("--streaming" , type=lowerCamelCase__ )
parser.add_argument("--local_rank" , type=lowerCamelCase__ )
parser.add_argument("--num_workers" , type=lowerCamelCase__ , default=0 )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Optional[int] = args.streaming
lowercase__ : Union[str, Any] = args.num_workers
lowercase__ : Optional[Any] = {"shards": [F"""shard_{shard_idx}""" for shard_idx in range(lowerCamelCase__ )]}
lowercase__ : Optional[int] = IterableDataset.from_generator(lowerCamelCase__ , gen_kwargs=lowerCamelCase__ )
if not streaming:
lowercase__ : Any = Dataset.from_list(list(lowerCamelCase__ ) )
lowercase__ : List[Any] = split_dataset_by_node(lowerCamelCase__ , rank=lowerCamelCase__ , world_size=lowerCamelCase__ )
lowercase__ : Optional[Any] = torch.utils.data.DataLoader(lowerCamelCase__ , num_workers=lowerCamelCase__ )
lowercase__ : List[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD
lowercase__ : str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
lowercase__ : Optional[int] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 81 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowerCAmelCase__ = [
'''openmmlab/upernet-convnext-tiny''',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowerCAmelCase__ = '''UperNetConfig'''
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int], str] = 0 , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
lowercase__ : str = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE , )
lowercase__ : int = nn.BatchNormad(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = nn.ReLU()
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : torch.Tensor ):
lowercase__ : str = self.conv(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.batch_norm(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.activation(SCREAMING_SNAKE_CASE )
return output
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
super().__init__()
lowercase__ : List[str] = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE ),
UperNetConvModule(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : torch.Tensor ):
lowercase__ : List[Any] = input
for layer in self.layers:
lowercase__ : Optional[int] = layer(SCREAMING_SNAKE_CASE )
return hidden_state
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple[int, ...] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool ):
super().__init__()
lowercase__ : Dict = pool_scales
lowercase__ : Dict = align_corners
lowercase__ : Tuple = in_channels
lowercase__ : Union[str, Any] = channels
lowercase__ : str = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE , in_channels=SCREAMING_SNAKE_CASE , channels=SCREAMING_SNAKE_CASE )
self.blocks.append(SCREAMING_SNAKE_CASE )
self.add_module(str(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : torch.Tensor ):
lowercase__ : List[Any] = []
for ppm in self.blocks:
lowercase__ : Optional[Any] = ppm(SCREAMING_SNAKE_CASE )
lowercase__ : Any = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE )
return ppm_outs
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
super().__init__()
lowercase__ : Tuple = config
lowercase__ : Optional[Any] = config.pool_scales # e.g. (1, 2, 3, 6)
lowercase__ : int = in_channels
lowercase__ : Any = config.hidden_size
lowercase__ : Dict = False
lowercase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
lowercase__ : str = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
lowercase__ : Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
lowercase__ : Tuple = nn.ModuleList()
lowercase__ : Tuple = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
lowercase__ : Any = UperNetConvModule(SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
lowercase__ : List[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE )
self.fpn_convs.append(SCREAMING_SNAKE_CASE )
lowercase__ : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def snake_case ( self : Optional[Any] ):
self.apply(self._init_weights )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Optional[Any] = inputs[-1]
lowercase__ : Union[str, Any] = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[Any] = torch.cat(SCREAMING_SNAKE_CASE , dim=1 )
lowercase__ : Union[str, Any] = self.bottleneck(SCREAMING_SNAKE_CASE )
return output
def snake_case ( self : str , SCREAMING_SNAKE_CASE : torch.Tensor ):
# build laterals
lowercase__ : Optional[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE ) )
# build top-down path
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__ : Optional[Any] = laterals[i - 1].shape[2:]
lowercase__ : Tuple = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE , mode="bilinear" , align_corners=self.align_corners )
# build outputs
lowercase__ : Optional[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
lowercase__ : str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners )
lowercase__ : List[str] = torch.cat(SCREAMING_SNAKE_CASE , dim=1 )
lowercase__ : Optional[Any] = self.fpn_bottleneck(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.classifier(SCREAMING_SNAKE_CASE )
return output
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 3 , SCREAMING_SNAKE_CASE : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
lowercase__ : int = config
lowercase__ : Union[str, Any] = config.auxiliary_in_channels
lowercase__ : Dict = config.auxiliary_channels
lowercase__ : Tuple = config.auxiliary_num_convs
lowercase__ : Union[str, Any] = config.auxiliary_concat_input
lowercase__ : List[str] = in_index
lowercase__ : Union[str, Any] = (kernel_size // 2) * dilation
lowercase__ : str = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , dilation=SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
lowercase__ : str = nn.Identity()
else:
lowercase__ : Union[str, Any] = nn.Sequential(*SCREAMING_SNAKE_CASE )
if self.concat_input:
lowercase__ : List[Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
lowercase__ : List[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def snake_case ( self : Optional[Any] ):
self.apply(self._init_weights )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Optional[int] ):
if isinstance(SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : torch.Tensor ):
# just take the relevant feature maps
lowercase__ : Union[str, Any] = encoder_hidden_states[self.in_index]
lowercase__ : List[str] = self.convs(SCREAMING_SNAKE_CASE )
if self.concat_input:
lowercase__ : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
lowercase__ : Optional[Any] = self.classifier(SCREAMING_SNAKE_CASE )
return output
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = UperNetConfig
lowercase_ = """pixel_values"""
lowercase_ = True
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : Optional[int] ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def snake_case ( self : Optional[int] ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=False ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Any = value
lowerCAmelCase__ = r'''
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowerCAmelCase__ = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , _UpperCamelCase , )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
lowercase__ : List[str] = UperNetHead(SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
lowercase__ : List[str] = UperNetFCNHead(SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[torch.Tensor] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase__ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase__ : str = output_attentions if output_attentions is not None else self.config.output_attentions
lowercase__ : Any = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = outputs.feature_maps
lowercase__ : Optional[int] = self.decode_head(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = nn.functional.interpolate(SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
lowercase__ : str = None
if self.auxiliary_head is not None:
lowercase__ : Optional[int] = self.auxiliary_head(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
lowercase__ : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("The number of labels should be greater than one" )
else:
# compute weighted loss
lowercase__ : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
lowercase__ : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = loss_fct(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
lowercase__ : List[Any] = (logits,) + outputs[1:]
else:
lowercase__ : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE , logits=SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
import numpy as np
lowerCAmelCase__ = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class snake_case__:
"""simple docstring"""
def __init__( self : Dict ):
lowercase__ : List[str] = np.array(SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ , lowercase__ : str = np.where(letter == self.SQUARE )
lowercase__ : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : int = message.lower()
lowercase__ : Any = message.replace(" " , "" )
lowercase__ : Union[str, Any] = message.replace("j" , "i" )
lowercase__ : Optional[Any] = np.empty((2, len(SCREAMING_SNAKE_CASE )) )
for letter_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = self.letter_to_numbers(message[letter_index] )
lowercase__ : Union[str, Any] = numbers[0]
lowercase__ : Dict = numbers[1]
lowercase__ : str = first_step.reshape(2 * len(SCREAMING_SNAKE_CASE ) )
lowercase__ : int = ""
for numbers_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : int = int(second_step[numbers_index * 2] )
lowercase__ : Optional[Any] = int(second_step[(numbers_index * 2) + 1] )
lowercase__ : List[str] = self.numbers_to_letter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = encoded_message + letter
return encoded_message
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str ):
lowercase__ : List[Any] = message.lower()
message.replace(" " , "" )
lowercase__ : List[Any] = np.empty(2 * len(SCREAMING_SNAKE_CASE ) )
for letter_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Tuple = self.letter_to_numbers(message[letter_index] )
lowercase__ : Optional[Any] = numbers[0]
lowercase__ : Any = numbers[1]
lowercase__ : Any = first_step.reshape((2, len(SCREAMING_SNAKE_CASE )) )
lowercase__ : List[Any] = ""
for numbers_index in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Union[str, Any] = int(second_step[0, numbers_index] )
lowercase__ : List[Any] = int(second_step[1, numbers_index] )
lowercase__ : List[str] = self.numbers_to_letter(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = decoded_message + letter
return decoded_message
| 81 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 1 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[int] = str(lowerCamelCase__ )
return n == n[::-1]
def __lowerCamelCase ( lowerCamelCase__ = 1_000_000 ):
"""simple docstring"""
lowercase__ : Optional[int] = 0
for i in range(1 , lowerCamelCase__ ):
if is_palindrome(lowerCamelCase__ ) and is_palindrome(bin(lowerCamelCase__ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 81 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Union[str, Any] = len(lowerCamelCase__ )
lowercase__ : Dict = len(lowerCamelCase__ )
lowercase__ : List[Any] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
lowercase__ : Optional[int] = True
for i in range(lowerCamelCase__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
lowercase__ : Union[str, Any] = True
if a[i].islower():
lowercase__ : List[Any] = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
def __lowerCamelCase ( lowerCamelCase__ = 10 ):
"""simple docstring"""
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or n < 0:
raise ValueError("Invalid input" )
lowercase__ : List[str] = 10**n
lowercase__ : Tuple = 28_433 * (pow(2 , 7_830_457 , lowerCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(1_0) = }''')
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 1 |
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = []
lowercase__ : int = 1
while len(lowerCamelCase__ ) < 1e6:
constant.append(str(lowerCamelCase__ ) )
i += 1
lowercase__ : Optional[int] = "".join(lowerCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 81 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 1 |
def __lowerCamelCase ( ):
"""simple docstring"""
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(lowerCamelCase__ , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 81 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=lowerCamelCase__ )
lowercase__ : Any = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=lowerCamelCase__ )
env_command_parser(subparsers=lowerCamelCase__ )
launch_command_parser(subparsers=lowerCamelCase__ )
tpu_command_parser(subparsers=lowerCamelCase__ )
test_command_parser(subparsers=lowerCamelCase__ )
# Let's go
lowercase__ : Union[str, Any] = parser.parse_args()
if not hasattr(lowerCamelCase__ , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCamelCase__ )
if __name__ == "__main__":
main()
| 81 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
lowerCAmelCase__ = False
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=32 ):
set_seed(0 )
lowercase__ : Optional[Any] = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE , in_channels=3 , out_channels=3 )
lowercase__ : Dict = torch.optim.SGD(model.parameters() , lr=0.0_001 )
return model, optimizer
@slow
def snake_case ( self : Any ):
lowercase__ : List[str] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase__ : Optional[int] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=SCREAMING_SNAKE_CASE , )
lowercase__ : str = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule="linear" , clip_sample=SCREAMING_SNAKE_CASE , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase__ : int = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(SCREAMING_SNAKE_CASE ) for _ in range(4 )]
lowercase__ : List[str] = [torch.randn((4, 3, 32, 32) ).to(SCREAMING_SNAKE_CASE ) for _ in range(4 )]
lowercase__ : int = [torch.randint(0 , 1_000 , (4,) ).long().to(SCREAMING_SNAKE_CASE ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase__ , lowercase__ : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
lowercase__ : Dict = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE , timesteps[i] ).sample
lowercase__ : str = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase__ , lowercase__ : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(SCREAMING_SNAKE_CASE )
for i in range(4 ):
optimizer.zero_grad()
lowercase__ : List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , timesteps[i] ).sample
lowercase__ : str = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-5 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-5 ) )
| 81 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class snake_case__(pl.LightningModule ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
super().__init__()
lowercase__ : int = model
lowercase__ : Optional[int] = 2
lowercase__ : str = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case ( self : Dict ):
pass
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = LongformerModel.from_pretrained(lowerCamelCase__ )
lowercase__ : int = LightningModel(lowerCamelCase__ )
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowercase__ : Tuple = LongformerForQuestionAnswering.from_pretrained(lowerCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(lowerCamelCase__ )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 81 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase__ = '''
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")
>>> pipe.to("cuda")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save("cat.png")
```
'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=8 ):
"""simple docstring"""
lowercase__ : str = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ : int = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : DDPMScheduler , SCREAMING_SNAKE_CASE : VQModel , ):
super().__init__()
self.register_modules(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , movq=SCREAMING_SNAKE_CASE , )
lowercase__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if latents is None:
lowercase__ : Optional[int] = randn_tensor(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
lowercase__ : Dict = latents.to(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = latents * scheduler.init_noise_sigma
return latents
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Any = torch.device(f"""cuda:{gpu_id}""" )
lowercase__ : Union[str, Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ : List[str] = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ : Tuple = cpu_offload_with_hook(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prev_module_hook=SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
lowercase__ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : str ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE )
def __call__( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, List[torch.FloatTensor]] , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 4.0 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Optional[int] = self._execution_device
lowercase__ : Union[str, Any] = guidance_scale > 1.0
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
lowercase__ : Union[str, Any] = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : int = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE , dim=0 )
lowercase__ : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE , device=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.scheduler.timesteps
lowercase__ : Dict = self.unet.config.in_channels
lowercase__ , lowercase__ : List[str] = downscale_height_and_width(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
lowercase__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ : List[str] = {"image_embeds": image_embeds}
lowercase__ : Union[str, Any] = self.unet(
sample=SCREAMING_SNAKE_CASE , timestep=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , added_cond_kwargs=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ : Optional[int] = noise_pred.chunk(2 )
lowercase__ , lowercase__ : int = variance_pred.chunk(2 )
lowercase__ : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ : Dict = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ : int = self.scheduler.step(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , )[0]
# post-processing
lowercase__ : Any = self.movq.decode(SCREAMING_SNAKE_CASE , force_not_quantize=SCREAMING_SNAKE_CASE )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
lowercase__ : Dict = image * 0.5 + 0.5
lowercase__ : Optional[int] = image.clamp(0 , 1 )
lowercase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ : int = self.numpy_to_pil(SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE )
| 81 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = tempfile.mkdtemp()
# fmt: off
lowercase__ : Any = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowercase__ : Dict = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : Tuple = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowercase__ : Tuple = {"unk_token": "<unk>"}
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self : Any ):
lowercase__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self : int ):
lowercase__ : Optional[int] = self.get_tokenizer()
lowercase__ : List[Any] = self.get_rust_tokenizer()
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ : Tuple = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ : Dict = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase__ : int = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
lowercase__ : Union[str, Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : int = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.prepare_image_inputs()
lowercase__ : List[Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : Optional[int] = processor(images=SCREAMING_SNAKE_CASE , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self : str ):
lowercase__ : Tuple = self.get_image_processor()
lowercase__ : Any = self.get_tokenizer()
lowercase__ : Any = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
lowercase__ : Dict = processor(text=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.get_image_processor()
lowercase__ : Tuple = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "lower newer"
lowercase__ : str = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE ):
processor()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = self.get_image_processor()
lowercase__ : Optional[Any] = self.get_tokenizer()
lowercase__ : Tuple = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ : Any = processor.batch_decode(SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = self.get_image_processor()
lowercase__ : List[str] = self.get_tokenizer()
lowercase__ : Union[str, Any] = CLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE , image_processor=SCREAMING_SNAKE_CASE )
lowercase__ : Any = "lower newer"
lowercase__ : Union[str, Any] = self.prepare_image_inputs()
lowercase__ : int = processor(text=SCREAMING_SNAKE_CASE , images=SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase__ = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''PoolFormerFeatureExtractor''']
lowerCAmelCase__ = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 81 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : str = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : int = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : str = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = -1
lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer.decode(greedy_ids[0] )
lowercase__ : Union[str, Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Optional[int] = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
lowercase__ : List[Any] = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Union[str, Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = -1
lowercase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE )
lowercase__ : Any = greedy_ids[:, input_ids.shape[1] :]
lowercase__ : Any = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase__ : str = TextStreamer(SCREAMING_SNAKE_CASE , skip_prompt=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase__ : Optional[Any] = cs.out[:-1]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase__ : List[str] = AutoTokenizer.from_pretrained("distilgpt2" )
lowercase__ : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = -1
lowercase__ : List[Any] = torch.ones((1, 5) , device=SCREAMING_SNAKE_CASE ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase__ : Dict = TextStreamer(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
model.generate(SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=SCREAMING_SNAKE_CASE , streamer=SCREAMING_SNAKE_CASE )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase__ : List[Any] = cs.out[:-1] # Remove the final "\n"
lowercase__ : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def snake_case ( self : Optional[int] ):
lowercase__ : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
lowercase__ : List[str] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : int = -1
lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = TextIteratorStreamer(SCREAMING_SNAKE_CASE , timeout=0.001 )
lowercase__ : Union[str, Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
lowercase__ : Any = Thread(target=model.generate , kwargs=SCREAMING_SNAKE_CASE )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = ""
for new_text in streamer:
streamer_text += new_text
| 81 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : List[Any]=("DownEncoderBlock2D",) , SCREAMING_SNAKE_CASE : Dict=(64,) , SCREAMING_SNAKE_CASE : Optional[Any]=2 , SCREAMING_SNAKE_CASE : Optional[int]=32 , SCREAMING_SNAKE_CASE : List[str]="silu" , SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
lowercase__ : str = layers_per_block
lowercase__ : int = torch.nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Union[str, Any] = None
lowercase__ : Optional[int] = nn.ModuleList([] )
# down
lowercase__ : Dict = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = output_channel
lowercase__ : Dict = block_out_channels[i]
lowercase__ : List[str] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Union[str, Any] = get_down_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
self.down_blocks.append(SCREAMING_SNAKE_CASE )
# mid
lowercase__ : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# out
lowercase__ : int = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Tuple = 2 * out_channels if double_z else out_channels
lowercase__ : Tuple = nn.Convad(block_out_channels[-1] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : Tuple = False
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : List[str] = x
lowercase__ : Tuple = self.conv_in(SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : Union[str, Any] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Dict ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
lowercase__ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
# middle
lowercase__ : int = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# middle
lowercase__ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
lowercase__ : Any = down_block(SCREAMING_SNAKE_CASE )
# middle
lowercase__ : List[str] = self.mid_block(SCREAMING_SNAKE_CASE )
# post-process
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Optional[int]=("UpDecoderBlock2D",) , SCREAMING_SNAKE_CASE : int=(64,) , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : int=32 , SCREAMING_SNAKE_CASE : str="silu" , SCREAMING_SNAKE_CASE : Any="group" , ):
super().__init__()
lowercase__ : List[str] = layers_per_block
lowercase__ : int = nn.Convad(
SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowercase__ : Optional[Any] = None
lowercase__ : Dict = nn.ModuleList([] )
lowercase__ : List[str] = in_channels if norm_type == "spatial" else None
# mid
lowercase__ : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , )
# up
lowercase__ : Tuple = list(reversed(SCREAMING_SNAKE_CASE ) )
lowercase__ : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ : Tuple = output_channel
lowercase__ : List[Any] = reversed_block_out_channels[i]
lowercase__ : List[Any] = i == len(SCREAMING_SNAKE_CASE ) - 1
lowercase__ : Dict = get_up_block(
SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=SCREAMING_SNAKE_CASE , out_channels=SCREAMING_SNAKE_CASE , prev_output_channel=SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=SCREAMING_SNAKE_CASE , resnet_groups=SCREAMING_SNAKE_CASE , attention_head_dim=SCREAMING_SNAKE_CASE , temb_channels=SCREAMING_SNAKE_CASE , resnet_time_scale_shift=SCREAMING_SNAKE_CASE , )
self.up_blocks.append(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = output_channel
# out
if norm_type == "spatial":
lowercase__ : Any = SpatialNorm(block_out_channels[0] , SCREAMING_SNAKE_CASE )
else:
lowercase__ : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=SCREAMING_SNAKE_CASE , eps=1E-6 )
lowercase__ : Union[str, Any] = nn.SiLU()
lowercase__ : Any = nn.Convad(block_out_channels[0] , SCREAMING_SNAKE_CASE , 3 , padding=1 )
lowercase__ : List[Any] = False
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str=None ):
lowercase__ : Tuple = z
lowercase__ : List[str] = self.conv_in(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*SCREAMING_SNAKE_CASE : Optional[int] ):
return module(*SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
lowercase__ : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
lowercase__ : str = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , use_reentrant=SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
# middle
lowercase__ : Optional[int] = self.mid_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = sample.to(SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
lowercase__ : Optional[Any] = up_block(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
lowercase__ : Union[str, Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE )
else:
lowercase__ : Dict = self.conv_norm_out(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.conv_act(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.conv_out(SCREAMING_SNAKE_CASE )
return sample
class snake_case__(nn.Module ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[Any]="random" , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : int=True ):
super().__init__()
lowercase__ : List[Any] = n_e
lowercase__ : List[str] = vq_embed_dim
lowercase__ : Optional[Any] = beta
lowercase__ : List[str] = legacy
lowercase__ : Tuple = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowercase__ : Union[str, Any] = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
lowercase__ : Tuple = self.used.shape[0]
lowercase__ : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowercase__ : Any = self.re_embed
lowercase__ : Tuple = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
lowercase__ : str = n_e
lowercase__ : Union[str, Any] = sane_index_shape
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : List[str] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = (inds[:, :, None] == used[None, None, ...]).long()
lowercase__ : Dict = match.argmax(-1 )
lowercase__ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowercase__ : Optional[Any] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowercase__ : List[Any] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE ) > 1
lowercase__ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowercase__ : str = self.used.to(SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
lowercase__ : int = 0 # simply set to zero
lowercase__ : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , SCREAMING_SNAKE_CASE )
return back.reshape(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] ):
# reshape z -> (batch, height, width, channel) and flatten
lowercase__ : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowercase__ : Optional[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowercase__ : Optional[Any] = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
lowercase__ : List[str] = self.embedding(SCREAMING_SNAKE_CASE ).view(z.shape )
lowercase__ : Dict = None
lowercase__ : int = None
# compute loss for embedding
if not self.legacy:
lowercase__ : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowercase__ : List[str] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowercase__ : Union[str, Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowercase__ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowercase__ : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowercase__ : int = self.remap_to_used(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowercase__ : List[str] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowercase__ : Union[str, Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowercase__ : Union[str, Any] = self.unmap_to_all(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowercase__ : List[Any] = self.embedding(SCREAMING_SNAKE_CASE )
if shape is not None:
lowercase__ : Any = z_q.view(SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
lowercase__ : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str=False ):
lowercase__ : Dict = parameters
lowercase__ , lowercase__ : Optional[int] = torch.chunk(SCREAMING_SNAKE_CASE , 2 , dim=1 )
lowercase__ : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowercase__ : Optional[int] = deterministic
lowercase__ : Tuple = torch.exp(0.5 * self.logvar )
lowercase__ : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
lowercase__ : Any = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
lowercase__ : Tuple = randn_tensor(
self.mean.shape , generator=SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
lowercase__ : str = self.mean + self.std * sample
return x
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
lowercase__ : Any = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=SCREAMING_SNAKE_CASE )
def snake_case ( self : Tuple ):
return self.mean
| 81 | 1 |
from __future__ import annotations
lowerCAmelCase__ = '''Muhammad Umer Farooq'''
lowerCAmelCase__ = '''MIT'''
lowerCAmelCase__ = '''1.0.0'''
lowerCAmelCase__ = '''Muhammad Umer Farooq'''
lowerCAmelCase__ = '''contact@muhammadumerfarooq.me'''
lowerCAmelCase__ = '''Alpha'''
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : str ):
super().__init__()
lowercase__ : list[str] = []
lowercase__ : Tuple = domain
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : list[tuple[str, str | None]] ):
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowercase__ : Optional[Any] = parse.urljoin(self.domain , SCREAMING_SNAKE_CASE )
self.urls.append(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return ".".join(get_sub_domain_name(lowerCamelCase__ ).split("." )[-2:] )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return parse.urlparse(lowerCamelCase__ ).netloc
def __lowerCamelCase ( lowerCamelCase__ = "https://github.com" ):
"""simple docstring"""
lowercase__ : List[str] = get_domain_name(lowerCamelCase__ )
# Initialize the parser
lowercase__ : Tuple = Parser(lowerCamelCase__ )
try:
# Open URL
lowercase__ : Any = requests.get(lowerCamelCase__ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowercase__ : Tuple = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowercase__ : str = requests.get(lowerCamelCase__ )
# Get the valid email.
lowercase__ : str = re.findall("[a-zA-Z0-9]+@" + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(lowerCamelCase__ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = emails_from_url('''https://github.com''')
print(f'''{len(emails)} emails found:''')
print('''\n'''.join(sorted(emails)))
| 81 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = DiTPipeline
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowercase_ = PipelineTesterMixin.required_optional_params - {
"""latents""",
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
lowercase_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowercase_ = False
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE , activation_fn="gelu-approximate" , num_embeds_ada_norm=1_000 , norm_type="ada_norm_zero" , norm_elementwise_affine=SCREAMING_SNAKE_CASE , )
lowercase__ : Dict = AutoencoderKL()
lowercase__ : Any = DDIMScheduler()
lowercase__ : int = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int=0 ):
if str(SCREAMING_SNAKE_CASE ).startswith("mps" ):
lowercase__ : Union[str, Any] = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
lowercase__ : int = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Any ):
lowercase__ : List[Any] = "cpu"
lowercase__ : str = self.get_dummy_components()
lowercase__ : str = self.pipeline_class(**SCREAMING_SNAKE_CASE )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE )
lowercase__ : str = pipe(**SCREAMING_SNAKE_CASE ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowercase__ : Tuple = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowercase__ : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-3 )
def snake_case ( self : str ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def snake_case ( self : Tuple ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str ):
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
lowercase__ : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
lowercase__ : Optional[Any] = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
f"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
lowercase__ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
lowercase__ : Dict = ["vase", "umbrella"]
lowercase__ : Any = pipe.get_label_ids(SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
f"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 81 | 1 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 81 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = (CMStochasticIterativeScheduler,)
lowercase_ = 1_0
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Any = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**SCREAMING_SNAKE_CASE )
return config
def snake_case ( self : Optional[int] ):
lowercase__ : Tuple = 10
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[Any] = self.scheduler_classes[0](**SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : Any = scheduler.timesteps[0]
lowercase__ : Optional[int] = scheduler.timesteps[1]
lowercase__ : List[Any] = self.dummy_sample
lowercase__ : Tuple = 0.1 * sample
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Any = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case ( self : Dict ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Any = 1
scheduler.set_timesteps(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : List[str] = self.dummy_model()
lowercase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(SCREAMING_SNAKE_CASE ):
# 1. scale model input
lowercase__ : Tuple = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Optional[Any] = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Dict = pred_prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Union[str, Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 192.7_614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def snake_case ( self : Union[str, Any] ):
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = [106, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = scheduler.timesteps
lowercase__ : Optional[int] = torch.manual_seed(0 )
lowercase__ : Optional[int] = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase__ : Optional[Any] = scheduler.scale_model_input(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 2. predict noise residual
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# 3. predict previous sample x_t-1
lowercase__ : Tuple = scheduler.step(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).prev_sample
lowercase__ : Union[str, Any] = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 347.6_357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : int = [39, 30, 12, 15, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = [39, 30, 12, 1, 0]
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE )
with self.assertRaises(SCREAMING_SNAKE_CASE , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE )
| 81 | 1 |
from numpy import exp, pi, sqrt
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
# setable values
lowercase_ = 42
lowercase_ = 42
lowercase_ = None
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__(_UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowercase_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowercase_ = 42
@property
def snake_case ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 1_000 , SCREAMING_SNAKE_CASE : float = 0.0_001 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
lowercase__ : List[Any] = dtype
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
if common is None:
lowercase__ : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ : Dict = jnp.array(1.0 , dtype=self.dtype )
lowercase__ : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
return sample
def snake_case ( self : int , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
lowercase__ : Any = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ : Union[str, Any] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : List[Any]=None ):
lowercase__ : Tuple = state.common.alphas_cumprod[t]
lowercase__ : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ : Optional[int] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1E-2_0 ) )
elif variance_type == "fixed_large":
lowercase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ : List[Any] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ : List[Any] = variance
lowercase__ : Union[str, Any] = state.common.betas[t]
lowercase__ : Tuple = (predicted_variance + 1) / 2
lowercase__ : Optional[Any] = frac * max_log + (1 - frac) * min_log
return variance
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
lowercase__ : Tuple = timestep
if key is None:
lowercase__ : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ : str = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
lowercase__ : Any = None
# 1. compute alphas, betas
lowercase__ : Dict = state.common.alphas_cumprod[t]
lowercase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ : Optional[Any] = 1 - alpha_prod_t
lowercase__ : Optional[int] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ : Optional[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ : List[Any] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ : Any = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
lowercase__ : Any = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
lowercase__ : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 81 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class snake_case__:
"""simple docstring"""
def snake_case ( self : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
return None
class snake_case__:
"""simple docstring"""
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] ):
return None
class snake_case__(unittest.TestCase ):
"""simple docstring"""
lowercase_ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case ( self : Optional[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE , "tf" , 12 , **SCREAMING_SNAKE_CASE )
@require_torch
@slow
def snake_case ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE , "pt" , 12 , **SCREAMING_SNAKE_CASE )
@require_torch
@slow
def snake_case ( self : List[str] ):
from transformers import BertModel
lowercase__ : int = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(SCREAMING_SNAKE_CASE ) )
vocab_file.flush()
lowercase__ : List[str] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowercase__ : Optional[Any] = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE )
self._test_export(SCREAMING_SNAKE_CASE , "pt" , 12 , SCREAMING_SNAKE_CASE )
@require_tf
@slow
def snake_case ( self : Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : List[str] = self._test_export(SCREAMING_SNAKE_CASE , "tf" , 12 , **SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = quantize(Path(SCREAMING_SNAKE_CASE ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def snake_case ( self : Union[str, Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowercase__ : Tuple = self._test_export(SCREAMING_SNAKE_CASE , "pt" , 12 , **SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = quantize(SCREAMING_SNAKE_CASE )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any]=None , **SCREAMING_SNAKE_CASE : Any ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowercase__ : List[str] = Path(SCREAMING_SNAKE_CASE ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE )
@require_torch
@require_tokenizers
@slow
def snake_case ( self : List[str] ):
from transformers import BertModel
lowercase__ : Tuple = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "pt" )
@require_tf
@require_tokenizers
@slow
def snake_case ( self : Optional[Any] ):
from transformers import TFBertModel
lowercase__ : Tuple = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
lowercase__ : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "tf" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Optional[Any] = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = infer_shapes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def snake_case ( self : int ):
lowercase__ : Optional[int] = ["input_ids", "attention_mask", "token_type_ids"]
lowercase__ : List[str] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
lowercase__ , lowercase__ : str = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE ) , set(SCREAMING_SNAKE_CASE ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowercase__ , lowercase__ : Optional[Any] = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def snake_case ( self : Any ):
lowercase__ : Dict = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 81 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : CLIPSegForImageSegmentation , SCREAMING_SNAKE_CASE : CLIPSegProcessor , SCREAMING_SNAKE_CASE : AutoencoderKL , SCREAMING_SNAKE_CASE : CLIPTextModel , SCREAMING_SNAKE_CASE : CLIPTokenizer , SCREAMING_SNAKE_CASE : UNetaDConditionModel , SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , SCREAMING_SNAKE_CASE : CLIPImageProcessor , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : int = dict(scheduler.config )
lowercase__ : Any = 1
lowercase__ : Union[str, Any] = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
lowercase__ : Optional[Any] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = dict(scheduler.config )
lowercase__ : Union[str, Any] = True
lowercase__ : int = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ : Union[str, Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, List[str]] , SCREAMING_SNAKE_CASE : Union[torch.FloatTensor, PIL.Image.Image] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 512 , SCREAMING_SNAKE_CASE : int = 50 , SCREAMING_SNAKE_CASE : float = 7.5 , SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE : Optional[int] = 1 , SCREAMING_SNAKE_CASE : float = 0.0 , SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE : Optional[str] = "pil" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE : int = 1 , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
lowercase__ : Dict = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
lowercase__ : int = self.segmentation_model(**SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase__ : List[str] = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase__ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 81 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any]=13 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : str=224 , SCREAMING_SNAKE_CASE : Tuple=1_000 , SCREAMING_SNAKE_CASE : Union[str, Any]=[3, 3, 6, 4] , SCREAMING_SNAKE_CASE : List[str]=[48, 56, 112, 220] , ):
lowercase__ : List[str] = parent
lowercase__ : List[Any] = batch_size
lowercase__ : int = num_channels
lowercase__ : Optional[Any] = is_training
lowercase__ : Optional[Any] = use_labels
lowercase__ : str = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = num_labels
lowercase__ : int = image_size
lowercase__ : Tuple = layer_depths
lowercase__ : Optional[int] = embed_dims
def snake_case ( self : List[str] ):
lowercase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : List[str] = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[Any] ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=SCREAMING_SNAKE_CASE , layer_scale_init_value=1E-5 , )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Optional[int] = SwiftFormerModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Optional[int] = self.num_labels
lowercase__ : int = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowercase__ : List[str] = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Any ):
((lowercase__) , (lowercase__) , (lowercase__)) : str = self.prepare_config_and_inputs()
lowercase__ : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowercase_ = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : str ):
lowercase__ : Optional[Any] = SwiftFormerModelTester(self )
lowercase__ : List[str] = ConfigTester(
self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def snake_case ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def snake_case ( self : str ):
pass
def snake_case ( self : Union[str, Any] ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def snake_case ( self : Dict ):
lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Dict = [*signature.parameters.keys()]
lowercase__ : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Any ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = SwiftFormerModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Tuple ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.hidden_states
lowercase__ : Dict = 8
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(SCREAMING_SNAKE_CASE ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
def _config_zero_init(SCREAMING_SNAKE_CASE : Any ):
lowercase__ : Dict = copy.deepcopy(SCREAMING_SNAKE_CASE )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 1E-1_0 )
if isinstance(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ):
lowercase__ : Union[str, Any] = _config_zero_init(getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return configs_no_init
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(config=SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Optional[Any] ):
pass
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Optional[Any] ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def snake_case ( self : Dict ):
lowercase__ : List[str] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = self.default_image_processor
lowercase__ : str = prepare_img()
lowercase__ : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Dict = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : str = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
lowercase__ : str = True if "large" in model_name or "huge" in model_name else False
lowercase__ : Optional[Any] = True if "large" in model_name or "huge" in model_name else False
lowercase__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ : int = [3, 3, 3, 3]
lowercase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ : Optional[Any] = [4, 4, 4, 4]
lowercase__ : Optional[Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ : Union[str, Any] = [3, 3, 3, 3]
else:
lowercase__ : Tuple = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ : Optional[Any] = 96
elif "small" in model_name:
lowercase__ : List[str] = 96
elif "base" in model_name:
lowercase__ : str = 128
elif "large" in model_name:
lowercase__ : Any = 192
elif "xlarge" in model_name:
lowercase__ : str = 256
elif "huge" in model_name:
lowercase__ : List[str] = 352
# set label information
lowercase__ : Tuple = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
lowercase__ : List[Any] = "imagenet-22k-id2label.json"
else:
lowercase__ : Optional[int] = "imagenet-1k-id2label.json"
lowercase__ : Optional[int] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : int = {v: k for k, v in idalabel.items()}
lowercase__ : str = FocalNetConfig(
embed_dim=lowerCamelCase__ , depths=lowerCamelCase__ , focal_levels=lowerCamelCase__ , focal_windows=lowerCamelCase__ , use_conv_embed=lowerCamelCase__ , idalabel=lowerCamelCase__ , labelaid=lowerCamelCase__ , use_post_layernorm=lowerCamelCase__ , use_layerscale=lowerCamelCase__ , )
return config
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ : int = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
lowercase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
lowercase__ : List[str] = "encoder." + name
if "encoder.layers" in name:
lowercase__ : Optional[Any] = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
lowercase__ : Optional[Any] = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
lowercase__ : List[str] = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ : Any = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ : Optional[Any] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
lowercase__ : List[str] = "layernorm.weight"
if name == "norm.bias":
lowercase__ : List[Any] = "layernorm.bias"
if "head" in name:
lowercase__ : Optional[int] = name.replace("head" , "classifier" )
else:
lowercase__ : Union[str, Any] = "focalnet." + name
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
lowercase__ : List[Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
lowercase__ : Union[str, Any] = model_name_to_url[model_name]
print("Checkpoint URL: " , lowerCamelCase__ )
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : List[str] = val
lowercase__ : List[str] = get_focalnet_config(lowerCamelCase__ )
lowercase__ : Union[str, Any] = FocalNetForImageClassification(lowerCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(lowerCamelCase__ )
# verify conversion
lowercase__ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : int = BitImageProcessor(
do_resize=lowerCamelCase__ , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=lowerCamelCase__ , crop_size=224 , do_normalize=lowerCamelCase__ , image_mean=lowerCamelCase__ , image_std=lowerCamelCase__ , )
lowercase__ : Tuple = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
lowercase__ : Tuple = processor(images=lowerCamelCase__ , return_tensors="pt" )
lowercase__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ : int = image_transforms(lowerCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , lowerCamelCase__ , atol=1e-4 )
lowercase__ : List[Any] = model(**lowerCamelCase__ )
lowercase__ : int = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ : Optional[int] = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowercase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowercase__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowercase__ : str = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowercase__ : Optional[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
class snake_case__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : list ):
lowercase__ : Dict = set_counts
lowercase__ : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = len(SCREAMING_SNAKE_CASE )
lowercase__ : str = [1] * num_sets
lowercase__ : Union[str, Any] = list(range(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = self.get_parent(SCREAMING_SNAKE_CASE )
lowercase__ : Any = self.get_parent(SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase__ : Tuple = 0
lowercase__ : List[str] = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase__ : int = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase__ : Union[str, Any] = 0
lowercase__ : Optional[Any] = src_parent
lowercase__ : Tuple = self.set_counts[src_parent]
lowercase__ : str = max(self.max_set , SCREAMING_SNAKE_CASE )
return True
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int ):
if self.parents[disj_set] == disj_set:
return disj_set
lowercase__ : Optional[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 81 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : str=32 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : Union[str, Any]=10 , SCREAMING_SNAKE_CASE : Tuple=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : Optional[Any]=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : str="relu" , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=None , ):
lowercase__ : Any = parent
lowercase__ : Union[str, Any] = batch_size
lowercase__ : Optional[int] = image_size
lowercase__ : str = num_channels
lowercase__ : List[str] = embeddings_size
lowercase__ : List[str] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[Any] = is_training
lowercase__ : List[Any] = use_labels
lowercase__ : Optional[int] = hidden_act
lowercase__ : Dict = num_labels
lowercase__ : List[Any] = scope
lowercase__ : str = len(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[int] = None
if self.use_labels:
lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Optional[Any] ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : str = TFResNetModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : Any = self.num_labels
lowercase__ : Dict = TFResNetForImageClassification(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : Any ):
lowercase__ : List[str] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : str = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
lowercase_ = (
{"""feature-extraction""": TFResNetModel, """image-classification""": TFResNetForImageClassification}
if is_tf_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFResNetModelTester(self )
lowercase__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : Optional[int] ):
return
@unittest.skip(reason="ResNet does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ResNet does not support input and output embeddings" )
def snake_case ( self : Dict ):
pass
def snake_case ( self : List[Any] ):
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : int ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : str = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ : int = layer_type
lowercase__ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[int] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] ):
lowercase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[Any] ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Dict = TFResNetModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Tuple ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self : List[str] ):
lowercase__ : Dict = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase__ : Any = self.default_image_processor
lowercase__ : Tuple = prepare_img()
lowercase__ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# forward pass
lowercase__ : Optional[int] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = tf.constant([-11.1_069, -9.7_877, -8.3_777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCAmelCase__ = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase__ : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
lowercase__ : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
lowercase__ , lowercase__ : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
lowercase__ : int = ["key_proj", "value_proj", "query_proj"]
lowercase__ : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
lowercase__ : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
lowercase__ : Tuple = prophet
lowercase__ : Tuple = prophet_old
else:
lowercase__ : Tuple = prophet.prophetnet
lowercase__ : List[str] = prophet_old.model
lowercase__ : int = False
for attribute in attributes:
if attribute in mapping:
lowercase__ : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
lowercase__ : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase__ : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowercase__ : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase__ : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowercase__ : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
lowercase__ : str = old_model.in_proj_weight.shape[0] // 3
lowercase__ : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase__ : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase__ : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase__ : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase__ : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase__ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowercase__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowercase__ : Union[str, Any] = True
break
if attribute.isdigit():
lowercase__ : str = model[int(lowerCamelCase__ )]
lowercase__ : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
lowercase__ : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowercase__ : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 81 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Tuple ):
warnings.warn(
"The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DeiTImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 81 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[int]=7 , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : Dict=18 , SCREAMING_SNAKE_CASE : Dict=30 , SCREAMING_SNAKE_CASE : Optional[int]=400 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : List[str]=True , ):
lowercase__ : str = size if size is not None else {"shortest_edge": 20}
lowercase__ : str = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase__ : Any = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : str = image_size
lowercase__ : Optional[int] = min_resolution
lowercase__ : Tuple = max_resolution
lowercase__ : Dict = do_resize
lowercase__ : Optional[Any] = size
lowercase__ : Optional[int] = do_center_crop
lowercase__ : Tuple = crop_size
lowercase__ : Optional[int] = do_flip_channel_order
def snake_case ( self : List[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = MobileViTImageProcessor if is_vision_available() else None
def snake_case ( self : str ):
lowercase__ : int = MobileViTImageProcessingTester(self )
@property
def snake_case ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self : Optional[int] ):
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "center_crop" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_flip_channel_order" ) )
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def snake_case ( self : Tuple ):
pass
def snake_case ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
lowercase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : List[Any] ):
# Initialize image_processing
lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Dict = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def snake_case ( self : List[str] ):
# Initialize image_processing
lowercase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
lowercase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ : Tuple = image_processing(SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 81 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
from __future__ import annotations
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : dict[str, list[str]] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Tuple = graph
# mapping node to its parent in resulting breadth first tree
lowercase__ : dict[str, str | None] = {}
lowercase__ : Any = source_vertex
def snake_case ( self : List[Any] ):
lowercase__ : Tuple = {self.source_vertex}
lowercase__ : str = None
lowercase__ : str = [self.source_vertex] # first in first out queue
while queue:
lowercase__ : Optional[int] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = vertex
queue.append(SCREAMING_SNAKE_CASE )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : str ):
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase__ : Optional[int] = self.parent.get(SCREAMING_SNAKE_CASE )
if target_vertex_parent is None:
lowercase__ : Optional[Any] = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
return self.shortest_path(SCREAMING_SNAKE_CASE ) + f"""->{target_vertex}"""
if __name__ == "__main__":
lowerCAmelCase__ = Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 81 |
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int=13 , SCREAMING_SNAKE_CASE : Union[str, Any]=30 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=3 , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : str=4 , SCREAMING_SNAKE_CASE : List[Any]=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : List[Any]=0.1 , SCREAMING_SNAKE_CASE : int=10 , SCREAMING_SNAKE_CASE : List[str]=0.02 , SCREAMING_SNAKE_CASE : Tuple=3 , SCREAMING_SNAKE_CASE : str=0.6 , SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : List[Any] = patch_size
lowercase__ : Any = num_channels
lowercase__ : Optional[int] = is_training
lowercase__ : Dict = use_labels
lowercase__ : Any = hidden_size
lowercase__ : List[Any] = num_hidden_layers
lowercase__ : Union[str, Any] = num_attention_heads
lowercase__ : Dict = intermediate_size
lowercase__ : Optional[int] = hidden_act
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : Union[str, Any] = attention_probs_dropout_prob
lowercase__ : List[Any] = type_sequence_label_size
lowercase__ : Any = initializer_range
lowercase__ : Optional[int] = mask_ratio
lowercase__ : Union[str, Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase__ : List[Any] = (image_size // patch_size) ** 2
lowercase__ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case ( self : int ):
lowercase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : str = None
if self.use_labels:
lowercase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
lowercase__ : Tuple = TFViTMAEModel(config=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches
lowercase__ : List[str] = (self.image_size // self.patch_size) ** 2
lowercase__ : List[Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase__ : Dict = 1
lowercase__ : List[Any] = TFViTMAEForPreTraining(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case ( self : Optional[int] ):
lowercase__ : int = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__)) : Dict = config_and_inputs
lowercase__ : str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase_ = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[str] ):
lowercase__ : List[Any] = TFViTMAEModelTester(self )
lowercase__ : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def snake_case ( self : Optional[Any] ):
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Union[str, Any] = [*signature.parameters.keys()]
lowercase__ : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Any = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = outputs_dict[0].numpy()
lowercase__ : Optional[int] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def snake_case ( self : str ):
# make the mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Tuple = {}
for k, v in inputs_dict.items():
if tf.is_tensor(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = v.numpy()
else:
lowercase__ : List[Any] = np.array(SCREAMING_SNAKE_CASE )
return inputs_np_dict
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = prepare_numpy_arrays(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase__ : Optional[int] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
lowercase__ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : Union[str, Any] = tf.constant(SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase__ : Optional[int] = tf_noise
super().check_pt_tf_models(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : int = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(SCREAMING_SNAKE_CASE )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),)
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(SCREAMING_SNAKE_CASE , "_keras_serializable" , SCREAMING_SNAKE_CASE )
}
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase__ : str = tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
lowercase__ : Tuple = main_layer_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
lowercase__ : Tuple = tf.keras.Model(SCREAMING_SNAKE_CASE , outputs=main_layer(SCREAMING_SNAKE_CASE ) )
lowercase__ : str = model(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ : str = os.path.join(SCREAMING_SNAKE_CASE , "keras_model.h5" )
model.save(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = tf.keras.models.load_model(
SCREAMING_SNAKE_CASE , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(SCREAMING_SNAKE_CASE , tf.keras.Model )
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : Optional[int] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : str = outputs.last_hidden_state.numpy()
lowercase__ : Optional[Any] = 0
else:
lowercase__ : Optional[Any] = outputs.logits.numpy()
lowercase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE , saved_model=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
if model_class.__name__ == "TFViTMAEModel":
lowercase__ : Optional[int] = after_outputs["last_hidden_state"].numpy()
lowercase__ : Optional[int] = 0
else:
lowercase__ : str = after_outputs["logits"].numpy()
lowercase__ : Tuple = 0
lowercase__ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(SCREAMING_SNAKE_CASE , 1E-5 )
def snake_case ( self : List[Any] ):
# make mask reproducible
np.random.seed(2 )
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[str] = int((config.image_size // config.patch_size) ** 2 )
lowercase__ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
lowercase__ : Tuple = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
lowercase__ : str = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(SCREAMING_SNAKE_CASE )
lowercase__ : int = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
lowercase__ : Any = model_class.from_config(model.config )
lowercase__ : Tuple = new_model(SCREAMING_SNAKE_CASE ) # Build model
new_model.set_weights(model.get_weights() )
lowercase__ : Union[str, Any] = new_model(SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
self.assert_outputs_same(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def snake_case ( self : str ):
pass
@slow
def snake_case ( self : List[Any] ):
lowercase__ : List[Any] = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : Any ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def snake_case ( self : Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase__ : Optional[Any] = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
lowercase__ : Optional[Any] = self.default_image_processor
lowercase__ : Union[str, Any] = prepare_img()
lowercase__ : Tuple = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase__ : Union[str, Any] = ViTMAEConfig()
lowercase__ : str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase__ : List[str] = np.random.uniform(size=(1, num_patches) )
# forward pass
lowercase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE , noise=SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : List[str] = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
| 81 | 1 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = AutoencoderKL
lowercase_ = """sample"""
lowercase_ = 1e-2
@property
def snake_case ( self : Dict ):
lowercase__ : Tuple = 4
lowercase__ : Dict = 3
lowercase__ : Any = (32, 32)
lowercase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE )
return {"sample": image}
@property
def snake_case ( self : Optional[Any] ):
return (3, 32, 32)
@property
def snake_case ( self : Optional[Any] ):
return (3, 32, 32)
def snake_case ( self : Optional[int] ):
lowercase__ : Optional[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowercase__ : Dict = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self : Any ):
pass
def snake_case ( self : Tuple ):
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def snake_case ( self : str ):
# enable deterministic behavior for gradient checkpointing
lowercase__ , lowercase__ : List[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase__ : List[Any] = self.model_class(**SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
assert not model.is_gradient_checkpointing and model.training
lowercase__ : Any = model(**SCREAMING_SNAKE_CASE ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase__ : Optional[Any] = torch.randn_like(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase__ : List[Any] = self.model_class(**SCREAMING_SNAKE_CASE )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase__ : Optional[Any] = model_a(**SCREAMING_SNAKE_CASE ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase__ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowercase__ : Optional[Any] = dict(model.named_parameters() )
lowercase__ : Optional[int] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def snake_case ( self : Any ):
lowercase__ , lowercase__ : List[Any] = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def snake_case ( self : List[str] ):
lowercase__ : Dict = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
lowercase__ : List[Any] = model.to(SCREAMING_SNAKE_CASE )
model.eval()
if torch_device == "mps":
lowercase__ : Optional[Any] = torch.manual_seed(0 )
else:
lowercase__ : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowercase__ : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase__ : Optional[int] = image.to(SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE , sample_posterior=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE ).sample
lowercase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase__ : Optional[Any] = torch.tensor(
[
-4.0_0_7_8E-0_1,
-3.8_3_2_3E-0_4,
-1.2_6_8_1E-0_1,
-1.1_4_6_2E-0_1,
2.0_0_9_5E-0_1,
1.0_8_9_3E-0_1,
-8.8_2_4_7E-0_2,
-3.0_3_6_1E-0_1,
-9.8_6_4_4E-0_3,
] )
elif torch_device == "cpu":
lowercase__ : Any = torch.tensor(
[-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] )
else:
lowercase__ : List[Any] = torch.tensor(
[-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , rtol=1E-2 ) )
@slow
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
return f"""gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE ) for s in shape] )}.npy"""
def snake_case ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int=0 , SCREAMING_SNAKE_CASE : str=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE : Tuple=False ):
lowercase__ : int = torch.floataa if fpaa else torch.floataa
lowercase__ : Dict = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ).to(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
return image
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int]="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
lowercase__ : Optional[Any] = "fp16" if fpaa else None
lowercase__ : List[Any] = torch.floataa if fpaa else torch.floataa
lowercase__ : List[str] = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE , subfolder="vae" , torch_dtype=SCREAMING_SNAKE_CASE , revision=SCREAMING_SNAKE_CASE , )
model.to(SCREAMING_SNAKE_CASE ).eval()
return model
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=0 ):
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE )
return torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : List[str] = self.get_sd_vae_model()
lowercase__ : Union[str, Any] = self.get_sd_image(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_generator(SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , sample_posterior=SCREAMING_SNAKE_CASE ).sample
assert sample.shape == image.shape
lowercase__ : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase__ : Tuple = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]],
[47, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]],
# fmt: on
] )
@require_torch_gpu
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE , fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.get_generator(SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Any = model(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , sample_posterior=SCREAMING_SNAKE_CASE ).sample
assert sample.shape == image.shape
lowercase__ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase__ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]],
[47, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]],
# fmt: on
] )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : int = self.get_sd_vae_model()
lowercase__ : Any = self.get_sd_image(SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE ).sample
assert sample.shape == image.shape
lowercase__ : List[str] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase__ : str = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]],
[37, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]],
# fmt: on
] )
@require_torch_gpu
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Any = self.get_sd_vae_model()
lowercase__ : Dict = self.get_sd_image(SCREAMING_SNAKE_CASE , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase__ : List[Any] = model.decode(SCREAMING_SNAKE_CASE ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase__ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase__ : int = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]],
[16, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]],
# fmt: on
] )
@require_torch_gpu
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Tuple = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = self.get_sd_image(SCREAMING_SNAKE_CASE , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Dict = model.decode(SCREAMING_SNAKE_CASE ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase__ : List[str] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase__ : Any = torch.tensor(SCREAMING_SNAKE_CASE )
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] ):
lowercase__ : Any = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Dict = model.decode(SCREAMING_SNAKE_CASE ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase__ : Any = model.decode(SCREAMING_SNAKE_CASE ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
lowercase__ : Union[str, Any] = self.get_sd_vae_model()
lowercase__ : List[str] = self.get_sd_image(SCREAMING_SNAKE_CASE , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase__ : str = model.decode(SCREAMING_SNAKE_CASE ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase__ : Optional[Any] = model.decode(SCREAMING_SNAKE_CASE ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]],
[47, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]],
# fmt: on
] )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ):
lowercase__ : int = self.get_sd_vae_model()
lowercase__ : str = self.get_sd_image(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self.get_generator(SCREAMING_SNAKE_CASE )
with torch.no_grad():
lowercase__ : Dict = model.encode(SCREAMING_SNAKE_CASE ).latent_dist
lowercase__ : str = dist.sample(generator=SCREAMING_SNAKE_CASE )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase__ : Optional[int] = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase__ : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = 3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE )
| 81 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
# TODO Update this
lowerCAmelCase__ = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """esm"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : str=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Tuple=768 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Any=12 , SCREAMING_SNAKE_CASE : Optional[int]=3_072 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_026 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : str=1E-1_2 , SCREAMING_SNAKE_CASE : List[str]="absolute" , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , mask_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = vocab_size
lowercase__ : int = hidden_size
lowercase__ : Union[str, Any] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : List[str] = attention_probs_dropout_prob
lowercase__ : List[str] = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[int] = position_embedding_type
lowercase__ : Optional[int] = use_cache
lowercase__ : Optional[int] = emb_layer_norm_before
lowercase__ : List[str] = token_dropout
lowercase__ : Optional[int] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
lowercase__ : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE )
lowercase__ : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
lowercase__ : List[str] = get_default_vocab_list()
else:
lowercase__ : List[Any] = vocab_list
else:
lowercase__ : List[Any] = None
lowercase__ : List[str] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , SCREAMING_SNAKE_CASE ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def snake_case ( self : List[str] ):
lowercase__ : Optional[Any] = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE ):
lowercase__ : Dict = self.esmfold_config.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = None
lowercase_ = True
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = 0
lowercase_ = True
lowercase_ = False
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Optional[int] ):
if self.trunk is None:
lowercase__ : Dict = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE ):
lowercase__ : int = TrunkConfig(**self.trunk )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Any = self.trunk.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 4_8
lowercase_ = 1_0_2_4
lowercase_ = 1_2_8
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 3_2
lowercase_ = 0
lowercase_ = 0
lowercase_ = False
lowercase_ = 4
lowercase_ = 1_2_8
lowercase_ = None
def snake_case ( self : Dict ):
if self.structure_module is None:
lowercase__ : str = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""" )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""" )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""" )
lowercase__ : Union[str, Any] = self.sequence_state_dim // self.sequence_head_width
lowercase__ : List[Any] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""" )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""" )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""" )
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""" )
def snake_case ( self : Optional[Any] ):
lowercase__ : int = asdict(self )
lowercase__ : Optional[int] = self.structure_module.to_dict()
return output
@dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 3_8_4
lowercase_ = 1_2_8
lowercase_ = 1_6
lowercase_ = 1_2_8
lowercase_ = 1_2
lowercase_ = 4
lowercase_ = 8
lowercase_ = 0.1
lowercase_ = 8
lowercase_ = 1
lowercase_ = 2
lowercase_ = 7
lowercase_ = 1_0
lowercase_ = 1e-8
lowercase_ = 1e5
def snake_case ( self : Dict ):
return asdict(self )
def __lowerCamelCase ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 81 | 1 |
import qiskit
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowercase__ : Any = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowercase__ : List[str] = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 81 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """deformable_detr"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Dict=3 , SCREAMING_SNAKE_CASE : int=300 , SCREAMING_SNAKE_CASE : Any=1_024 , SCREAMING_SNAKE_CASE : Dict=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[int]=8 , SCREAMING_SNAKE_CASE : str=6 , SCREAMING_SNAKE_CASE : Optional[int]=1_024 , SCREAMING_SNAKE_CASE : Optional[Any]=8 , SCREAMING_SNAKE_CASE : List[Any]=0.0 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]="relu" , SCREAMING_SNAKE_CASE : List[Any]=256 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : List[str]=0.0 , SCREAMING_SNAKE_CASE : Tuple=0.02 , SCREAMING_SNAKE_CASE : Any=1.0 , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : str=False , SCREAMING_SNAKE_CASE : Optional[int]="sine" , SCREAMING_SNAKE_CASE : List[str]="resnet50" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Any=False , SCREAMING_SNAKE_CASE : Optional[Any]=4 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Tuple=4 , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Tuple=300 , SCREAMING_SNAKE_CASE : Optional[Any]=False , SCREAMING_SNAKE_CASE : Tuple=1 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : str=1 , SCREAMING_SNAKE_CASE : List[str]=5 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE : str=False , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase__ : Optional[int] = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ : List[Any] = backbone_config.get("model_type" )
lowercase__ : Any = CONFIG_MAPPING[backbone_model_type]
lowercase__ : str = config_class.from_dict(SCREAMING_SNAKE_CASE )
lowercase__ : int = use_timm_backbone
lowercase__ : Optional[Any] = backbone_config
lowercase__ : Union[str, Any] = num_channels
lowercase__ : List[Any] = num_queries
lowercase__ : List[Any] = max_position_embeddings
lowercase__ : Union[str, Any] = d_model
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : Optional[Any] = encoder_attention_heads
lowercase__ : Optional[Any] = decoder_ffn_dim
lowercase__ : List[Any] = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : str = dropout
lowercase__ : Union[str, Any] = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : Optional[Any] = activation_function
lowercase__ : Optional[Any] = init_std
lowercase__ : str = init_xavier_std
lowercase__ : Any = encoder_layerdrop
lowercase__ : int = auxiliary_loss
lowercase__ : Dict = position_embedding_type
lowercase__ : int = backbone
lowercase__ : Optional[Any] = use_pretrained_backbone
lowercase__ : List[Any] = dilation
# deformable attributes
lowercase__ : Dict = num_feature_levels
lowercase__ : Optional[int] = encoder_n_points
lowercase__ : Any = decoder_n_points
lowercase__ : int = two_stage
lowercase__ : int = two_stage_num_proposals
lowercase__ : Union[str, Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase__ : List[Any] = class_cost
lowercase__ : Optional[int] = bbox_cost
lowercase__ : Any = giou_cost
# Loss coefficients
lowercase__ : List[str] = mask_loss_coefficient
lowercase__ : int = dice_loss_coefficient
lowercase__ : Any = bbox_loss_coefficient
lowercase__ : Any = giou_loss_coefficient
lowercase__ : Optional[int] = eos_coefficient
lowercase__ : int = focal_alpha
lowercase__ : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : List[Any] ):
return self.encoder_attention_heads
@property
def snake_case ( self : Union[str, Any] ):
return self.d_model
def snake_case ( self : str ):
lowercase__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase__ : int = self.backbone_config.to_dict()
lowercase__ : Union[str, Any] = self.__class__.model_type
return output
| 81 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : str=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Tuple=2 , SCREAMING_SNAKE_CASE : Optional[Any]=99 , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : Tuple=5 , SCREAMING_SNAKE_CASE : Union[str, Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=512 , SCREAMING_SNAKE_CASE : int=12 , SCREAMING_SNAKE_CASE : Any=2 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : List[str]="last" , SCREAMING_SNAKE_CASE : Tuple=None , SCREAMING_SNAKE_CASE : Optional[int]=None , ):
lowercase__ : List[Any] = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : int = is_training
lowercase__ : Dict = use_input_lengths
lowercase__ : Any = use_token_type_ids
lowercase__ : str = use_labels
lowercase__ : Tuple = gelu_activation
lowercase__ : List[Any] = sinusoidal_embeddings
lowercase__ : Tuple = causal
lowercase__ : Dict = asm
lowercase__ : int = n_langs
lowercase__ : Tuple = vocab_size
lowercase__ : Dict = n_special
lowercase__ : int = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : Dict = num_attention_heads
lowercase__ : Tuple = hidden_dropout_prob
lowercase__ : Optional[Any] = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : int = type_vocab_size
lowercase__ : Any = type_sequence_label_size
lowercase__ : Optional[int] = initializer_range
lowercase__ : Tuple = num_labels
lowercase__ : Dict = num_choices
lowercase__ : Tuple = summary_type
lowercase__ : List[Any] = use_proj
lowercase__ : List[str] = scope
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : int = None
if self.use_input_lengths:
lowercase__ : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase__ : str = None
if self.use_token_type_ids:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase__ : Any = None
lowercase__ : List[str] = None
lowercase__ : Any = None
if self.use_labels:
lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowercase__ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def snake_case ( self : Optional[int] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , ):
lowercase__ : str = FlaubertModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , lengths=SCREAMING_SNAKE_CASE , langs=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , langs=SCREAMING_SNAKE_CASE )
lowercase__ : int = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , ):
lowercase__ : int = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : Union[str, Any] = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : List[str] = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE )
lowercase__ : int = model(
SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , cls_index=SCREAMING_SNAKE_CASE , is_impossible=SCREAMING_SNAKE_CASE , p_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : int = model(
SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE , cls_index=SCREAMING_SNAKE_CASE , is_impossible=SCREAMING_SNAKE_CASE , )
((lowercase__) , ) : str = result_with_labels.to_tuple()
lowercase__ : Optional[int] = model(SCREAMING_SNAKE_CASE , start_positions=SCREAMING_SNAKE_CASE , end_positions=SCREAMING_SNAKE_CASE )
((lowercase__) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Any , ):
lowercase__ : Optional[Any] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : int = model(SCREAMING_SNAKE_CASE )
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , ):
lowercase__ : Tuple = self.num_labels
lowercase__ : Optional[Any] = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , ):
lowercase__ : Any = self.num_choices
lowercase__ : List[Any] = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase__ : Dict = model(
SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case ( self : str ):
lowercase__ : Dict = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) : Union[str, Any] = config_and_inputs
lowercase__ : Union[str, Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ = (
{
"""feature-extraction""": FlaubertModel,
"""fill-mask""": FlaubertWithLMHeadModel,
"""question-answering""": FlaubertForQuestionAnsweringSimple,
"""text-classification""": FlaubertForSequenceClassification,
"""token-classification""": FlaubertForTokenClassification,
"""zero-shot""": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]=False ):
lowercase__ : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
return inputs_dict
def snake_case ( self : List[Any] ):
lowercase__ : Any = FlaubertModelTester(self )
lowercase__ : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , emb_dim=37 )
def snake_case ( self : int ):
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Union[str, Any] ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : str ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : str = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def snake_case ( self : Tuple ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase__ : Optional[Any] = True
lowercase__ : str = model_class(config=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = torch.jit.trace(
SCREAMING_SNAKE_CASE , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , "traced_model.pt" ) )
lowercase__ : List[Any] = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE , "traced_model.pt" ) , map_location=SCREAMING_SNAKE_CASE )
loaded(inputs_dict["input_ids"].to(SCREAMING_SNAKE_CASE ) , inputs_dict["attention_mask"].to(SCREAMING_SNAKE_CASE ) )
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self : str ):
lowercase__ : Tuple = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowercase__ : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
with torch.no_grad():
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )[0]
lowercase__ : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = ["""pixel_values"""]
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 8 , **SCREAMING_SNAKE_CASE : Dict , ):
super().__init__(**SCREAMING_SNAKE_CASE )
lowercase__ : str = do_rescale
lowercase__ : Optional[Any] = rescale_factor
lowercase__ : Any = do_pad
lowercase__ : Optional[Any] = pad_size
def snake_case ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] ):
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
lowercase__ , lowercase__ : str = get_image_size(SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = (old_height // size + 1) * size - old_height
lowercase__ : List[Any] = (old_width // size + 1) * size - old_width
return pad(SCREAMING_SNAKE_CASE , ((0, pad_height), (0, pad_width)) , mode="symmetric" , data_format=SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[float] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Union[str, ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : str = do_pad if do_pad is not None else self.do_pad
lowercase__ : Optional[int] = pad_size if pad_size is not None else self.pad_size
lowercase__ : Tuple = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
lowercase__ : Any = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_pad:
lowercase__ : Tuple = [self.pad(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Union[str, Any] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
lowercase__ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 81 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 |
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=lowerCamelCase__ , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=lowerCamelCase__ , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=lowerCamelCase__ , help="where to store parsed gold_data_path file" , )
lowercase__ : Dict = parser.parse_args()
with open(args.src_path , "r" ) as src_file, open(args.evaluation_set , "w" ) as eval_file, open(
args.gold_data_path , "w" ) as gold_file:
lowercase__ : List[str] = json.load(lowerCamelCase__ )
for dpr_record in tqdm(lowerCamelCase__ ):
lowercase__ : Any = dpr_record["question"]
lowercase__ : str = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n" )
gold_file.write("\t".join(lowerCamelCase__ ) + "\n" )
if __name__ == "__main__":
main()
| 81 | 1 |
# Algorithm for the pigeonhole sorting
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[Any] = min(lowerCamelCase__ ) # min() finds the minimum value
lowercase__ : List[str] = max(lowerCamelCase__ ) # max() finds the maximum value
lowercase__ : List[str] = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase__ : Tuple = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase__ : Optional[int] = 0
for count in range(lowerCamelCase__ ):
while holes[count] > 0:
holes[count] -= 1
lowercase__ : Optional[Any] = count + min_val
i += 1
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : List[Any] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(lowerCamelCase__ )
print("Sorted order is:" , " ".join(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 81 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase__ = logging.getLogger(__name__)
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : str = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowerCamelCase__ , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowerCamelCase__ , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowerCamelCase__ , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowerCamelCase__ , default=1_000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowerCamelCase__ , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowerCamelCase__ , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowerCamelCase__ , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
lowercase__ : Optional[int] = parser.parse_args()
return args
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
def fn(lowerCamelCase__ ):
return tokenizer(examples["text"] )
return fn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = []
for i in range(len(tokenized_data["input_ids"] ) ):
lowercase__ : str = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
lowercase__ : Any = tf.train.Features(feature=lowerCamelCase__ )
lowercase__ : Any = tf.train.Example(features=lowerCamelCase__ )
lowercase__ : str = example.SerializeToString()
records.append(lowerCamelCase__ )
return records
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ : List[str] = min(len(lowerCamelCase__ ) , args.limit )
lowercase__ : Union[str, Any] = dataset.select(range(lowerCamelCase__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
lowercase__ : Any = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ : Any = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCamelCase__ ):
os.makedirs(lowerCamelCase__ )
else:
lowercase__ : str = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ : str = tokenize_function(lowerCamelCase__ )
lowercase__ : Optional[int] = dataset.map(lowerCamelCase__ , batched=lowerCamelCase__ , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCamelCase__ ):
# Concatenate all texts.
lowercase__ : Optional[Any] = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ : int = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ : List[str] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ : Optional[int] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ : Union[str, Any] = dataset_tokenized.map(lowerCamelCase__ , batched=lowerCamelCase__ , batch_size=1_000 , num_proc=4 )
lowercase__ : str = 0
lowercase__ : str = 0
for shard in range(0 , len(lowerCamelCase__ ) , args.shard_size ):
lowercase__ : List[str] = grouped_dataset[shard : shard + args.shard_size]
lowercase__ : str = len(dataset_snapshot["input_ids"] )
lowercase__ : int = os.path.join(lowerCamelCase__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
lowercase__ : Optional[int] = get_serialized_examples(lowerCamelCase__ )
with tf.io.TFRecordWriter(lowerCamelCase__ ) as out_file:
for i in range(len(lowerCamelCase__ ) ):
lowercase__ : Optional[int] = serialized_examples[i]
out_file.write(lowerCamelCase__ )
print("Wrote file {} containing {} records".format(lowerCamelCase__ , lowerCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , "w" ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = parse_args()
main(args)
| 81 | 1 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__:
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple=13 , SCREAMING_SNAKE_CASE : List[str]=32 , SCREAMING_SNAKE_CASE : int=3 , SCREAMING_SNAKE_CASE : Any=4 , SCREAMING_SNAKE_CASE : Optional[Any]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE : int=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : str=37 , SCREAMING_SNAKE_CASE : Tuple="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=10 , SCREAMING_SNAKE_CASE : Optional[int]=0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE : Optional[int]=[2, 3, 4] , SCREAMING_SNAKE_CASE : str=None , ):
lowercase__ : Union[str, Any] = parent
lowercase__ : Optional[int] = batch_size
lowercase__ : Optional[Any] = image_size
lowercase__ : Tuple = num_channels
lowercase__ : Tuple = num_stages
lowercase__ : List[Any] = hidden_sizes
lowercase__ : Any = depths
lowercase__ : List[str] = is_training
lowercase__ : int = use_labels
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : List[Any] = hidden_act
lowercase__ : Tuple = num_labels
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Optional[Any] = out_features
lowercase__ : Union[str, Any] = out_indices
lowercase__ : Tuple = scope
def snake_case ( self : Dict ):
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Dict = None
if self.use_labels:
lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ : Tuple = self.get_config()
return config, pixel_values, labels
def snake_case ( self : Tuple ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase__ : Dict = ConvNextVaModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
lowercase__ : Any = ConvNextVaForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : str = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : Any = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase__ : str = None
lowercase__ : List[Any] = ConvNextVaBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def snake_case ( self : Dict ):
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs
lowercase__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
def snake_case ( self : Optional[Any] ):
lowercase__ : Optional[Any] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs
lowercase__ : Optional[Any] = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class snake_case__(_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def snake_case ( self : List[Any] ):
lowercase__ : List[str] = ConvNextVaModelTester(self )
lowercase__ : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def snake_case ( self : Optional[int] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self : List[str] ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def snake_case ( self : Dict ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def snake_case ( self : Union[str, Any] ):
pass
def snake_case ( self : Optional[int] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : List[str] = True
if model_class.__name__ in [
*get_values(SCREAMING_SNAKE_CASE ),
*get_values(SCREAMING_SNAKE_CASE ),
]:
continue
lowercase__ : List[str] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.train()
lowercase__ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
lowercase__ : Optional[Any] = False
lowercase__ : Dict = True
if (
model_class.__name__
in [*get_values(SCREAMING_SNAKE_CASE ), *get_values(SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
lowercase__ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowercase__ : str = model(**SCREAMING_SNAKE_CASE ).loss
loss.backward()
def snake_case ( self : int ):
lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE )
lowercase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : str = [*signature.parameters.keys()]
lowercase__ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict ):
lowercase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : str ):
lowercase__ : Any = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowercase__ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowercase__ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ : Dict = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Optional[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def snake_case ( self : Any ):
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
@slow
def snake_case ( self : List[str] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : List[str] = ConvNextVaModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def snake_case ( self : Optional[int] ):
lowercase__ : Union[str, Any] = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(SCREAMING_SNAKE_CASE )
lowercase__ : Dict = self.default_image_processor
lowercase__ : int = prepare_img()
lowercase__ : Optional[Any] = preprocessor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowercase__ : Tuple = model(**SCREAMING_SNAKE_CASE )
# verify the logits
lowercase__ : Optional[int] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 81 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str ):
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , SCREAMING_SNAKE_CASE , )
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 81 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@slow
@require_torch
def snake_case ( self : Any ):
lowercase__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
lowercase__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
lowercase__ : str = bertabert.config.encoder.vocab_size
lowercase__ : List[str] = tokenizer.sep_token_id
lowercase__ : Optional[Any] = tokenizer.cls_token_id
lowercase__ : int = 128
lowercase__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
lowercase__ : Tuple = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
lowercase__ : Tuple = train_dataset.select(range(32 ) )
lowercase__ : Optional[int] = val_dataset.select(range(16 ) )
lowercase__ : int = 4
def _map_to_encoder_decoder_inputs(SCREAMING_SNAKE_CASE : Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
lowercase__ : List[Any] = tokenizer(batch["article"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=512 )
lowercase__ : Dict = tokenizer(batch["highlights"] , padding="max_length" , truncation=SCREAMING_SNAKE_CASE , max_length=128 )
lowercase__ : Tuple = inputs.input_ids
lowercase__ : Optional[int] = inputs.attention_mask
lowercase__ : int = outputs.input_ids
lowercase__ : Dict = outputs.input_ids.copy()
lowercase__ : int = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
lowercase__ : List[Any] = outputs.attention_mask
assert all(len(SCREAMING_SNAKE_CASE ) == 512 for x in inputs.input_ids )
assert all(len(SCREAMING_SNAKE_CASE ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : Union[str, Any] = pred.label_ids
lowercase__ : Dict = pred.predictions
# all unnecessary tokens are removed
lowercase__ : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : str = tokenizer.batch_decode(SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = sum([int(pred_str[i] == label_str[i] ) for i in range(len(SCREAMING_SNAKE_CASE ) )] ) / len(SCREAMING_SNAKE_CASE )
return {"accuracy": accuracy}
# map train dataset
lowercase__ : List[str] = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
lowercase__ : Any = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
lowercase__ : List[str] = self.get_auto_remove_tmp_dir()
lowercase__ : int = SeqaSeqTrainingArguments(
output_dir=SCREAMING_SNAKE_CASE , per_device_train_batch_size=SCREAMING_SNAKE_CASE , per_device_eval_batch_size=SCREAMING_SNAKE_CASE , predict_with_generate=SCREAMING_SNAKE_CASE , evaluation_strategy="steps" , do_train=SCREAMING_SNAKE_CASE , do_eval=SCREAMING_SNAKE_CASE , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
lowercase__ : str = SeqaSeqTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , compute_metrics=_compute_metrics , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , )
# start training
trainer.train()
| 81 | 1 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/informer-tourism-monthly''': (
'''https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'''
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """informer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : int , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.05 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str = "prob" , SCREAMING_SNAKE_CASE : int = 5 , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : List[Any] , ):
# time series specific configuration
lowercase__ : Any = prediction_length
lowercase__ : List[str] = context_length or prediction_length
lowercase__ : Tuple = distribution_output
lowercase__ : Union[str, Any] = loss
lowercase__ : Union[str, Any] = input_size
lowercase__ : List[str] = num_time_features
lowercase__ : Optional[Any] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase__ : List[str] = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : Tuple = num_static_real_features
lowercase__ : List[str] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Dict = cardinality
else:
lowercase__ : Dict = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Union[str, Any] = embedding_dimension
else:
lowercase__ : Optional[int] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Dict = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Tuple = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : Optional[Any] = d_model
lowercase__ : int = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : List[Any] = encoder_ffn_dim
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : List[str] = encoder_layers
lowercase__ : Tuple = decoder_layers
lowercase__ : Union[str, Any] = dropout
lowercase__ : List[Any] = attention_dropout
lowercase__ : str = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Union[str, Any] = decoder_layerdrop
lowercase__ : Tuple = activation_function
lowercase__ : str = init_std
lowercase__ : Tuple = use_cache
# Informer
lowercase__ : Union[str, Any] = attention_type
lowercase__ : Union[str, Any] = sampling_factor
lowercase__ : Tuple = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 81 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : List[str] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowercase__ : Tuple = 192
lowercase__ : List[Any] = 768
lowercase__ : Tuple = 12
lowercase__ : List[str] = 3
lowercase__ : List[Any] = [800, 1_333]
lowercase__ : Union[str, Any] = False
elif yolos_name == "yolos_s_dWr":
lowercase__ : str = 330
lowercase__ : List[Any] = 14
lowercase__ : Tuple = 6
lowercase__ : Optional[int] = 1_320
elif "yolos_s" in yolos_name:
lowercase__ : Dict = 384
lowercase__ : str = 1_536
lowercase__ : List[Any] = 12
lowercase__ : List[Any] = 6
elif "yolos_b" in yolos_name:
lowercase__ : int = [800, 1_344]
lowercase__ : Tuple = 91
lowercase__ : Optional[int] = "huggingface/label-files"
lowercase__ : Optional[int] = "coco-detection-id2label.json"
lowercase__ : Any = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="dataset" ) , "r" ) )
lowercase__ : Optional[int] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowercase__ : List[Any] = idalabel
lowercase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowercase__ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Union[str, Any] = in_proj_weight[: config.hidden_size, :]
lowercase__ : Union[str, Any] = in_proj_bias[: config.hidden_size]
lowercase__ : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase__ : str = in_proj_weight[-config.hidden_size :, :]
lowercase__ : Tuple = in_proj_bias[-config.hidden_size :]
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "backbone" in name:
lowercase__ : Union[str, Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
lowercase__ : List[str] = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
lowercase__ : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
lowercase__ : List[Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
lowercase__ : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowercase__ : str = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
lowercase__ : int = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowercase__ : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowercase__ : Optional[int] = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowercase__ : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase__ : int = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowercase__ : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase__ : Union[str, Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
lowercase__ : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
lowercase__ : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
lowercase__ : Optional[Any] = name.replace("vit.norm" , "vit.layernorm" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase__ : List[Any] = orig_state_dict.pop(lowerCamelCase__ )
if "qkv" in key:
lowercase__ : Dict = key.split("." )
lowercase__ : List[Any] = int(key_split[2] )
lowercase__ : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowercase__ : str = val[:dim, :]
lowercase__ : int = val[
dim : dim * 2, :
]
lowercase__ : str = val[-dim:, :]
else:
lowercase__ : Tuple = val[:dim]
lowercase__ : Any = val[dim : dim * 2]
lowercase__ : Optional[Any] = val[-dim:]
else:
lowercase__ : Optional[Any] = val
return orig_state_dict
def __lowerCamelCase ( ):
"""simple docstring"""
lowercase__ : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase__ : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False ):
"""simple docstring"""
lowercase__ : List[Any] = get_yolos_config(lowerCamelCase__ )
# load original state_dict
lowercase__ : Dict = torch.load(lowerCamelCase__ , map_location="cpu" )["model"]
# load 🤗 model
lowercase__ : Dict = YolosForObjectDetection(lowerCamelCase__ )
model.eval()
lowercase__ : int = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by YolosImageProcessor
lowercase__ : Dict = 800 if yolos_name != "yolos_ti" else 512
lowercase__ : Optional[Any] = YolosImageProcessor(format="coco_detection" , size=lowerCamelCase__ )
lowercase__ : int = image_processor(images=prepare_img() , return_tensors="pt" )
lowercase__ : int = model(**lowerCamelCase__ )
lowercase__ , lowercase__ : int = outputs.logits, outputs.pred_boxes
lowercase__ , lowercase__ : int = None, None
if yolos_name == "yolos_ti":
lowercase__ : Optional[int] = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
lowercase__ : Dict = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
lowercase__ : Any = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
lowercase__ : List[str] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
lowercase__ : Dict = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
lowercase__ : Tuple = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
lowercase__ : Optional[Any] = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
lowercase__ : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
lowercase__ : List[str] = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
lowercase__ : List[str] = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCamelCase__ )
if push_to_hub:
lowercase__ : Tuple = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
lowercase__ : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(lowerCamelCase__ , organization="hustvl" )
model.push_to_hub(lowerCamelCase__ , organization="hustvl" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 81 | 1 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
if len(lowerCamelCase__ ) < k or k < 0:
raise ValueError("Invalid Input" )
lowercase__ : Dict = sum(array[:k] )
for i in range(len(lowerCamelCase__ ) - k ):
lowercase__ : int = current_sum - array[i] + array[i + k]
lowercase__ : Optional[Any] = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase__ = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
lowerCAmelCase__ = randint(0, 1_1_0)
print(f'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 81 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.