code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list ) -> list:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
for i in range(1 , lowerCAmelCase__ ):
lowercase = collection[i]
lowercase = 0
lowercase = i - 1
while low <= high:
lowercase = (low + high) // 2
if val < collection[mid]:
lowercase = mid - 1
else:
lowercase = mid + 1
for j in range(lowerCAmelCase__ , lowerCAmelCase__ , -1 ):
lowercase = collection[j - 1]
lowercase = val
return collection
if __name__ == "__main__":
__lowerCAmelCase : List[str] =input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase : Optional[Any] =[int(item) for item in user_input.split(""",""")]
print(binary_insertion_sort(unsorted))
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : str ={
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] , lowerCAmelCase__ :str ) -> list[int]:
'''simple docstring'''
lowercase = int(lowerCAmelCase__ )
# Initialize Result
lowercase = []
# Traverse through all denomination
for denomination in reversed(lowerCAmelCase__ ):
# Find denominations
while int(lowerCAmelCase__ ) >= int(lowerCAmelCase__ ):
total_value -= int(lowerCAmelCase__ )
answer.append(lowerCAmelCase__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__lowerCAmelCase : str =[]
__lowerCAmelCase : Union[str, Any] ="""0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__lowerCAmelCase : Optional[Any] =int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
__lowerCAmelCase : Optional[int] =input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__lowerCAmelCase : Tuple =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__lowerCAmelCase : int =input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F"""Following is minimal change for {value}: """)
__lowerCAmelCase : List[Any] =find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 32
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ )
lowercase = len(lowerCAmelCase__ )
lowercase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase = []
for char_count in range(lowerCAmelCase__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 32
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 1
|
"""simple docstring"""
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :np.array ) -> np.array:
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 1
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__lowerCAmelCase : Optional[Any] =[
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
lowercase = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
else:
lowercase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.feature_extractor
lowercase = hf_model.adapter
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowercase = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(lowerCAmelCase__ )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" , lowerCAmelCase__ )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
lowercase = """weight"""
else:
lowercase = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
lowercase = full_name.split("""adaptor.""" )[-1]
lowercase = name.split(""".""" )
if items[1].isdigit():
lowercase = int(items[1] )
else:
lowercase = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
lowercase = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
lowercase = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
lowercase = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
lowercase = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
lowercase = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
lowercase = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , ) -> List[Any]:
'''simple docstring'''
lowercase = WavaVecaConfig.from_pretrained(
lowerCAmelCase__ , add_adapter=lowerCAmelCase__ , adapter_stride=lowerCAmelCase__ , adapter_kernel_size=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , output_hidden_size=lowerCAmelCase__ , )
lowercase = MBartConfig.from_pretrained(lowerCAmelCase__ )
# load model
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
lowercase = model[0].eval()
# load feature extractor
lowercase = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ )
# set weights for wav2vec2 encoder
lowercase = WavaVecaModel(lowerCAmelCase__ )
recursively_load_weights_wavaveca(model.encoder , lowerCAmelCase__ )
# load decoder weights
lowercase = MBartForCausalLM(lowerCAmelCase__ )
lowercase , lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=lowerCAmelCase__ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
lowercase = SpeechEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
lowercase = False
lowercase = MBartaaTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(lowerCAmelCase__ )
lowercase = hf_wavavec.config.to_dict()
lowercase = tokenizer.pad_token_id
lowercase = tokenizer.bos_token_id
lowercase = tokenizer.eos_token_id
lowercase = """mbart50"""
lowercase = """wav2vec2"""
lowercase = tokenizer.eos_token_id
lowercase = 2_5_0_0_0_4
lowercase = tokenizer.eos_token_id
lowercase = SpeechEncoderDecoderConfig.from_dict(lowerCAmelCase__ )
hf_wavavec.save_pretrained(lowerCAmelCase__ )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1_0_2_4, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=2_5_0_0_0_4, type=int, help="""`decoder_start_token_id` of model config""")
__lowerCAmelCase : List[Any] =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
__lowerCAmelCase : Dict =list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__lowerCAmelCase : Union[str, Any] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _A :
snake_case__ : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
snake_case__ : Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A folder containing the training data.'} )
snake_case__ : Optional[str] = field(default=lowerCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
snake_case__ : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
snake_case__ : int = field(default=32 , metadata={'help': 'The size of the square patches to use for masking.'} )
snake_case__ : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def A__ ( self ):
"""simple docstring"""
lowercase = {}
if self.train_dir is not None:
lowercase = self.train_dir
if self.validation_dir is not None:
lowercase = self.validation_dir
lowercase = data_files if data_files else None
@dataclass
class _A :
snake_case__ : str = field(
default=lowerCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowerCAmelCase )} , )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
snake_case__ : Optional[str] = field(
default=lowerCAmelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
snake_case__ : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
snake_case__ : str = field(default=lowerCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
snake_case__ : Optional[int] = field(
default=lowerCAmelCase , metadata={'help': 'Stride to use for the encoder.'} , )
class _A :
def __init__( self , __lowerCAmelCase=192 , __lowerCAmelCase=32 , __lowerCAmelCase=4 , __lowerCAmelCase=0.6 ):
"""simple docstring"""
lowercase = input_size
lowercase = mask_patch_size
lowercase = model_patch_size
lowercase = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("""Input size must be divisible by mask patch size""" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("""Mask patch size must be divisible by model patch size""" )
lowercase = self.input_size // self.mask_patch_size
lowercase = self.mask_patch_size // self.model_patch_size
lowercase = self.rand_size**2
lowercase = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ):
"""simple docstring"""
lowercase = np.random.permutation(self.token_count )[: self.mask_count]
lowercase = np.zeros(self.token_count , dtype=__lowerCAmelCase )
lowercase = 1
lowercase = mask.reshape((self.rand_size, self.rand_size) )
lowercase = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase = torch.stack([example["""mask"""] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mim""" , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
lowercase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowerCAmelCase__ ) and data_args.train_val_split > 0.0:
lowercase = ds["""train"""].train_test_split(data_args.train_val_split )
lowercase = split["""train"""]
lowercase = split["""test"""]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
lowercase = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
lowercase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowerCAmelCase__ , """decoder_type""" ):
lowercase = """simmim"""
# adapt config
lowercase = model_args.image_size if model_args.image_size is not None else config.image_size
lowercase = model_args.patch_size if model_args.patch_size is not None else config.patch_size
lowercase = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"""image_size""": model_args.image_size,
"""patch_size""": model_args.patch_size,
"""encoder_stride""": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
lowercase = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowerCAmelCase__ )
elif model_args.model_name_or_path:
lowercase = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase__ )
else:
lowercase = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
lowercase = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
lowercase = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
lowercase = AutoModelForMaskedImageModeling.from_config(lowerCAmelCase__ )
if training_args.do_train:
lowercase = ds["""train"""].column_names
else:
lowercase = ds["""validation"""].column_names
if data_args.image_column_name is not None:
lowercase = data_args.image_column_name
elif "image" in column_names:
lowercase = """image"""
elif "img" in column_names:
lowercase = """img"""
else:
lowercase = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
lowercase = Compose(
[
Lambda(lambda lowerCAmelCase__ : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
lowercase = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowerCAmelCase__ :Union[str, Any] ):
lowercase = [transforms(lowerCAmelCase__ ) for image in examples[image_column_name]]
lowercase = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowerCAmelCase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowerCAmelCase__ )
# Initialize our trainer
lowercase = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , )
# Training
if training_args.do_train:
lowercase = None
if training_args.resume_from_checkpoint is not None:
lowercase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase = last_checkpoint
lowercase = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase = trainer.evaluate()
trainer.log_metrics("""eval""" , lowerCAmelCase__ )
trainer.save_metrics("""eval""" , lowerCAmelCase__ )
# Write model card and (optionally) push to hub
lowercase = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """masked-image-modeling""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-image-modeling"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__lowerCAmelCase : Any ={
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : int =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A ( lowerCAmelCase ):
@staticmethod
@abstractmethod
def A__ ( __lowerCAmelCase ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def A__ ( self ):
"""simple docstring"""
raise NotImplementedError()
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=13 , __lowerCAmelCase=30 , __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=32 , __lowerCAmelCase=5 , __lowerCAmelCase=4 , __lowerCAmelCase=37 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=10 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=3 , __lowerCAmelCase=None , __lowerCAmelCase=2 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = scope
lowercase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase = (image_size // patch_size) ** 2
lowercase = num_patches + 2
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DeiTModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DeiTForMaskedImageModeling(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase = 1
lowercase = DeiTForMaskedImageModeling(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(__lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.type_sequence_label_size
lowercase = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase = 1
lowercase = DeiTForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ):
"""simple docstring"""
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
snake_case__ : List[str] = False
snake_case__ : int = False
snake_case__ : Tuple = False
def A__ ( self ):
"""simple docstring"""
lowercase = DeiTModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase = model(**__lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase = False
lowercase = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase = model_class(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
lowercase = model(**__lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
lowercase = problem_type["""title"""]
lowercase = problem_type["""num_labels"""]
lowercase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if problem_type["num_labels"] > 1:
lowercase = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowercase = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCAmelCase ) as warning_list:
lowercase = model(**__lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = DeiTModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def UpperCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = DeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" ).to(
__lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
lowercase = model(**__lowerCAmelCase )
# verify the logits
lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
lowercase = torch.tensor([-1.0_2_6_6, 0.1_9_1_2, -1.2_8_6_1] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ):
"""simple docstring"""
lowercase = DeiTModel.from_pretrained(
"""facebook/deit-base-distilled-patch16-224""" , torch_dtype=torch.floataa , device_map="""auto""" )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" )
lowercase = inputs.pixel_values.to(__lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase = model(__lowerCAmelCase )
| 32
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
| 1
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__lowerCAmelCase : int ="""bert-base-cased"""
__lowerCAmelCase : Dict ="""google/pegasus-xsum"""
__lowerCAmelCase : str =[""" Sam ate lunch today.""", """Sams lunch ingredients."""]
__lowerCAmelCase : Optional[Any] =["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
__lowerCAmelCase : Optional[Any] ="""patrickvonplaten/t5-tiny-random"""
__lowerCAmelCase : List[Any] ="""sshleifer/bart-tiny-random"""
__lowerCAmelCase : Union[str, Any] ="""sshleifer/tiny-mbart"""
__lowerCAmelCase : List[str] ="""sshleifer/tiny-marian-en-de"""
def UpperCAmelCase__ ( lowerCAmelCase__ :Path , lowerCAmelCase__ :list ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """\n""".join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open("""w""" ).writelines(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , f'{split}.source' ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , f'{split}.target' ) , lowerCAmelCase__ )
return tmp_dir
class _A ( lowerCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
lowercase = 4
lowercase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowercase , lowercase = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path="""train""" , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase , )
lowercase = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowercase = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in ARTICLES )
lowercase = max(len(tokenizer.encode(__lowerCAmelCase ) ) for a in SUMMARIES )
lowercase = 4
lowercase = LegacySeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path="""train""" , max_source_length=20 , max_target_length=__lowerCAmelCase , )
lowercase = DataLoader(__lowerCAmelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A__ ( self ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowercase = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowercase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(__lowerCAmelCase , __lowerCAmelCase , 128 , __lowerCAmelCase )
lowercase = {x.name for x in tmp_dir.iterdir()}
lowercase = {x.name for x in save_dir.iterdir()}
lowercase = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(__lowerCAmelCase ) < len(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == 1
assert len(packed_examples[0] ) == sum(len(__lowerCAmelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def A__ ( self ):
"""simple docstring"""
if not FAIRSEQ_AVAILABLE:
return
lowercase , lowercase , lowercase = self._get_dataset(max_len=64 )
lowercase = 64
lowercase = ds.make_dynamic_sampler(__lowerCAmelCase , required_batch_size_multiple=__lowerCAmelCase )
lowercase = [len(__lowerCAmelCase ) for x in batch_sampler]
assert len(set(__lowerCAmelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(__lowerCAmelCase ) == len(__lowerCAmelCase ) # no dropped or added examples
lowercase = DataLoader(__lowerCAmelCase , batch_sampler=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
lowercase = []
lowercase = []
for batch in data_loader:
lowercase = batch["""input_ids"""].shape
lowercase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowercase = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(__lowerCAmelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(__lowerCAmelCase )
assert num_src_per_batch[0] == max(__lowerCAmelCase )
if failures:
raise AssertionError(f'too many tokens in {len(__lowerCAmelCase )} batches' )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase , lowercase = self._get_dataset(max_len=512 )
lowercase = 2
lowercase = ds.make_sortish_sampler(__lowerCAmelCase , shuffle=__lowerCAmelCase )
lowercase = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 )
lowercase = DataLoader(__lowerCAmelCase , batch_size=__lowerCAmelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=__lowerCAmelCase )
lowercase = tokenizer.pad_token_id
def count_pad_tokens(__lowerCAmelCase , __lowerCAmelCase="input_ids" ):
return [batch[k].eq(__lowerCAmelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(__lowerCAmelCase , k="""labels""" ) ) < sum(count_pad_tokens(__lowerCAmelCase , k="""labels""" ) )
assert sum(count_pad_tokens(__lowerCAmelCase ) ) < sum(count_pad_tokens(__lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase=1000 , __lowerCAmelCase=128 ):
"""simple docstring"""
if os.getenv("""USE_REAL_DATA""" , __lowerCAmelCase ):
lowercase = """examples/seq2seq/wmt_en_ro"""
lowercase = max_len * 2 * 64
if not Path(__lowerCAmelCase ).joinpath("""train.len""" ).exists():
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
else:
lowercase = """examples/seq2seq/test_data/wmt_en_ro"""
lowercase = max_len * 4
save_len_file(__lowerCAmelCase , __lowerCAmelCase )
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=__lowerCAmelCase , type_path="""train""" , max_source_length=__lowerCAmelCase , max_target_length=__lowerCAmelCase , n_obs=__lowerCAmelCase , )
return ds, max_tokens, tokenizer
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase , lowercase = self._get_dataset()
lowercase = set(DistributedSortishSampler(__lowerCAmelCase , 256 , num_replicas=2 , rank=0 , add_extra_examples=__lowerCAmelCase ) )
lowercase = set(DistributedSortishSampler(__lowerCAmelCase , 256 , num_replicas=2 , rank=1 , add_extra_examples=__lowerCAmelCase ) )
assert idsa.intersection(__lowerCAmelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
if tok_name == MBART_TINY:
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowercase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowercase = SeqaSeqDataset(
__lowerCAmelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowercase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(__lowerCAmelCase ) == 1 if tok_name == BART_TINY else len(__lowerCAmelCase ) == 0
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : int ={
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class _A ( lowerCAmelCase ):
snake_case__ : List[Any] = 'deit'
def __init__( self , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=224 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=True , __lowerCAmelCase=16 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
lowercase = encoder_stride
class _A ( lowerCAmelCase ):
snake_case__ : Any = version.parse('1.11' )
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-4
| 32
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCAmelCase : Tuple =False
@skip_mps
class _A ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[Any] = StableDiffusionAttendAndExcitePipeline
snake_case__ : Optional[Any] = False
snake_case__ : Dict = TEXT_TO_IMAGE_PARAMS
snake_case__ : int = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
snake_case__ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCAmelCase , )
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase = CLIPTextModel(__lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = lowercase = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowercase = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowercase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCAmelCase , 1E-3 )
def A__ ( self ):
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A__ ( self ):
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A__ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def A__ ( self ):
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A__ ( self ):
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A__ ( self ):
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A__ ( self ):
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class _A ( unittest.TestCase ):
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
@classmethod
def A__ ( cls ):
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = torch.manual_seed(51 )
lowercase = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowercase = """a painting of an elephant with glasses"""
lowercase = [5, 7]
lowercase = pipe(
prompt=__lowerCAmelCase , token_indices=__lowerCAmelCase , guidance_scale=7.5 , generator=__lowerCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 32
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 1
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCAmelCase : List[Any] ="""src/diffusers"""
__lowerCAmelCase : Any ="""."""
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCAmelCase : Tuple =importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCAmelCase : Dict =spec.loader.load_module()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> Tuple:
'''simple docstring'''
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__ ) is not None
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
lowercase = object_name.split(""".""" )
lowercase = 0
# First let's find the module where our object lives.
lowercase = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , f'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
lowercase = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , f'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
# Now let's find the class / func in the code!
lowercase = """"""
lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(Rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
__lowerCAmelCase : Any =re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
__lowerCAmelCase : Union[str, Any] =re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
__lowerCAmelCase : Tuple =re.compile(R"""<FILL\s+[^>]*>""")
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = code.split("""\n""" )
lowercase = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
lowercase = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
lowercase = f'class Bla:\n{code}'
lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=lowerCAmelCase__ )
lowercase = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
lowercase , lowercase = style_docstrings_in_code(lowerCAmelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
lowercase = []
lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase = search.groups()
lowercase = find_code_in_diffusers(lowerCAmelCase__ )
lowercase = get_indent(lowerCAmelCase__ )
lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase = theoretical_indent
lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
lowercase = lines[line_index]
lowercase = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(f'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase = lines[start_index:line_index]
lowercase = """""".join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
lowercase = """\n""".join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
lowercase = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowercase = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase = pattern.groups()
lowercase = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
lowercase = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
lowercase = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase = blackify(lines[start_index - 1] + theoretical_code )
lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def UpperCAmelCase__ ( lowerCAmelCase__ :bool = False ) -> Union[str, Any]:
'''simple docstring'''
lowercase = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""" ) , recursive=lowerCAmelCase__ )
lowercase = []
for filename in all_files:
lowercase = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
lowercase = """\n""".join(lowerCAmelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
__lowerCAmelCase : Optional[Any] =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import requests
__lowerCAmelCase : Dict =set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :str = "new" , lowerCAmelCase__ :list | None = None ) -> dict:
'''simple docstring'''
lowercase = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ):
lowercase = f'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCAmelCase__ )
lowercase = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
lowercase = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )}
lowercase = {}
for id_ in range(lowerCAmelCase__ ):
lowercase = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 32
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 1
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = (DDPMScheduler,)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__lowerCAmelCase )
return config
def A__ ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCAmelCase , beta_end=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=__lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCAmelCase , prediction_type=__lowerCAmelCase , sample_max_value=__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for t in reversed(range(__lowerCAmelCase ) ):
# 1. predict noise residual
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(__lowerCAmelCase ) )
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
lowercase = scheduler.timesteps
for i, timestep in enumerate(__lowerCAmelCase ):
if i == len(__lowerCAmelCase ) - 1:
lowercase = -1
else:
lowercase = timesteps[i + 1]
lowercase = scheduler.previous_timestep(__lowerCAmelCase )
lowercase = prev_t.item()
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = [100, 87, 50, 51, 0]
with self.assertRaises(__lowerCAmelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = [100, 87, 50, 1, 0]
lowercase = len(__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__lowerCAmelCase , timesteps=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__lowerCAmelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__lowerCAmelCase )
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = ['pixel_values']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = size if size is not None else {"""shortest_edge""": 224}
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase = do_convert_rgb
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowercase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = size if size is not None else self.size
lowercase = get_size_dict(__lowerCAmelCase , param_name="""size""" , default_to_square=__lowerCAmelCase )
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" , default_to_square=__lowerCAmelCase )
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase = [convert_to_rgb(__lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowerCAmelCase : List[Any] =typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowerCAmelCase : int =typing.Union[np.floataa, int, float] # noqa: UP007
def UpperCAmelCase__ ( lowerCAmelCase__ :Vector , lowerCAmelCase__ :Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :Vector , lowerCAmelCase__ :Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=1_0_0_0_0 , globals=globals() , ) )
benchmark()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=2 , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=10 , __lowerCAmelCase=3 , __lowerCAmelCase=32 * 8 , __lowerCAmelCase=32 * 8 , __lowerCAmelCase=4 , __lowerCAmelCase=64 , ):
"""simple docstring"""
lowercase = parent
lowercase = batch_size
lowercase = is_training
lowercase = use_auxiliary_loss
lowercase = num_queries
lowercase = num_channels
lowercase = min_size
lowercase = max_size
lowercase = num_labels
lowercase = hidden_dim
lowercase = hidden_dim
def A__ ( self ):
"""simple docstring"""
lowercase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowerCAmelCase )
lowercase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowerCAmelCase )
lowercase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowerCAmelCase ) > 0.5
).float()
lowercase = (torch.rand((self.batch_size, self.num_labels) , device=__lowerCAmelCase ) > 0.5).long()
lowercase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def A__ ( self ):
"""simple docstring"""
lowercase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowercase = self.num_queries
lowercase = self.num_labels
lowercase = [1, 1, 1, 1]
lowercase = self.num_channels
lowercase = 64
lowercase = 128
lowercase = self.hidden_dim
lowercase = self.hidden_dim
lowercase = self.hidden_dim
return config
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase , lowercase , lowercase , lowercase = self.prepare_config_and_inputs()
lowercase = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = output.encoder_hidden_states
lowercase = output.pixel_decoder_hidden_states
lowercase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowerCAmelCase ) , config.decoder_layers )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
lowercase = MaskaFormerModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
lowercase = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
lowercase = model(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = MaskaFormerForUniversalSegmentation(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
def comm_check_on_output(__lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowercase = model(pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase )
lowercase = model(__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
lowercase = model(
pixel_values=__lowerCAmelCase , pixel_mask=__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
comm_check_on_output(__lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case__ : str = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
snake_case__ : Dict = False
snake_case__ : str = False
snake_case__ : Any = False
snake_case__ : List[Any] = False
def A__ ( self ):
"""simple docstring"""
lowercase = MaskaFormerModelTester(self )
lowercase = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__lowerCAmelCase )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def A__ ( self ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowercase = MaskaFormerModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = (self.model_tester.min_size,) * 2
lowercase = {
"""pixel_values""": torch.randn((2, 3, *size) , device=__lowerCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) , device=__lowerCAmelCase ),
"""class_labels""": torch.zeros(2 , 10 , device=__lowerCAmelCase ).long(),
}
lowercase = self.model_tester.get_config()
lowercase = MaskaFormerForUniversalSegmentation(__lowerCAmelCase ).to(__lowerCAmelCase )
lowercase = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowerCAmelCase , **__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
lowercase = model(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def A__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
lowercase = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
"""simple docstring"""
lowercase = self.all_model_classes[1]
lowercase , lowercase , lowercase , lowercase , lowercase = self.model_tester.prepare_config_and_inputs()
lowercase = True
lowercase = True
lowercase = model_class(__lowerCAmelCase ).to(__lowerCAmelCase )
model.train()
lowercase = model(__lowerCAmelCase , mask_labels=__lowerCAmelCase , class_labels=__lowerCAmelCase )
lowercase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowercase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowercase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowercase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCAmelCase : Any =1E-4
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _A ( unittest.TestCase ):
@cached_property
def A__ ( self ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def A__ ( self ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def A__ ( self ):
"""simple docstring"""
lowercase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__lowerCAmelCase )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
lowercase = model(**__lowerCAmelCase )
lowercase = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
lowercase = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
lowercase = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(__lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowerCAmelCase ).eval()
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
lowercase = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowerCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
lowercase = model(**__lowerCAmelCase )
# masks_queries_logits
lowercase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowercase = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
lowercase = torch.tensor(__lowerCAmelCase ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
# class_queries_logits
lowercase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowercase = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowerCAmelCase ).eval()
lowercase = self.default_image_processor
lowercase = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="""pt""" , )
lowercase = inputs["""pixel_values"""].to(__lowerCAmelCase )
lowercase = [el.to(__lowerCAmelCase ) for el in inputs["""mask_labels"""]]
lowercase = [el.to(__lowerCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowercase = model(**__lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 32
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
def __iter__( self ):
"""simple docstring"""
for element in self.data:
yield element
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any]=True ) -> List[Any]:
'''simple docstring'''
lowercase = Accelerator(even_batches=lowerCAmelCase__ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def UpperCAmelCase__ ( lowerCAmelCase__ :Accelerator , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :bool = False ) -> Dict:
'''simple docstring'''
if iterable:
lowercase = DummyIterableDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
else:
lowercase = TensorDataset(torch.as_tensor(range(lowerCAmelCase__ ) ) )
lowercase = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = accelerator.prepare(lowerCAmelCase__ )
return dl
def UpperCAmelCase__ ( lowerCAmelCase__ :Accelerator , lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :List[int] , ) -> Dict:
'''simple docstring'''
lowercase = create_dataloader(accelerator=lowerCAmelCase__ , dataset_size=lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
lowercase = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def UpperCAmelCase__ ( ) -> Optional[Any]:
'''simple docstring'''
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
lowerCAmelCase__ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(lowerCAmelCase__ ):
lowercase = ddp_model(batch[0].float() )
lowercase = output.sum()
loss.backward()
batch_idxs.append(lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Tuple:
'''simple docstring'''
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for multi-GPU" in str(w[-1].message )
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase = True
lowercase = False
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
lowercase = train_dl.batch_sampler.even_batches
lowercase = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
lowercase = True
lowercase = False
lowercase = create_accelerator(even_batches=lowerCAmelCase__ )
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
lowercase = create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
lowercase = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
lowercase = create_accelerator()
lowercase = torch.nn.Linear(1 , 1 )
lowercase = accelerator.prepare(lowerCAmelCase__ )
create_dataloader(lowerCAmelCase__ , dataset_size=3 , batch_size=1 , iterable=lowerCAmelCase__ )
with warnings.catch_warnings(record=lowerCAmelCase__ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=lowerCAmelCase__ ):
pass
assert issubclass(w[-1].category , lowerCAmelCase__ )
assert "only supported for map-style datasets" in str(w[-1].message )
def UpperCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
lowercase = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
lowercase = accelerator.state.distributed_type
lowercase = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCAmelCase__ )
lowercase = original_state
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
__lowerCAmelCase : Any ={
"""microsoft/table-transformer-detection""": (
"""https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : Any = 'table-transformer'
snake_case__ : int = ['past_key_values']
snake_case__ : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=3 , __lowerCAmelCase=100 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=6 , __lowerCAmelCase=2048 , __lowerCAmelCase=8 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=True , __lowerCAmelCase="relu" , __lowerCAmelCase=256 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase="sine" , __lowerCAmelCase="resnet50" , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=1 , __lowerCAmelCase=1 , __lowerCAmelCase=5 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowercase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.get("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# set timm attributes to None
lowercase , lowercase , lowercase = None, None, None
lowercase = use_timm_backbone
lowercase = backbone_config
lowercase = num_channels
lowercase = num_queries
lowercase = d_model
lowercase = encoder_ffn_dim
lowercase = encoder_layers
lowercase = encoder_attention_heads
lowercase = decoder_ffn_dim
lowercase = decoder_layers
lowercase = decoder_attention_heads
lowercase = dropout
lowercase = attention_dropout
lowercase = activation_dropout
lowercase = activation_function
lowercase = init_std
lowercase = init_xavier_std
lowercase = encoder_layerdrop
lowercase = decoder_layerdrop
lowercase = encoder_layers
lowercase = auxiliary_loss
lowercase = position_embedding_type
lowercase = backbone
lowercase = use_pretrained_backbone
lowercase = dilation
# Hungarian matcher
lowercase = class_cost
lowercase = bbox_cost
lowercase = giou_cost
# Loss coefficients
lowercase = mask_loss_coefficient
lowercase = dice_loss_coefficient
lowercase = bbox_loss_coefficient
lowercase = giou_loss_coefficient
lowercase = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def A__ ( self ):
"""simple docstring"""
return self.d_model
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = version.parse('1.11' )
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-5
@property
def A__ ( self ):
"""simple docstring"""
return 12
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase = deepcopy(__lowerCAmelCase )
elif os.path.exists(__lowerCAmelCase ):
with io.open(__lowerCAmelCase , """r""" , encoding="""utf-8""" ) as f:
lowercase = json.load(__lowerCAmelCase )
else:
try:
lowercase = baseaa.urlsafe_baadecode(__lowerCAmelCase ).decode("""utf-8""" )
lowercase = json.loads(__lowerCAmelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}' )
lowercase = config
self.set_stage_and_offload()
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_value("""zero_optimization.stage""" , -1 )
# offload
lowercase = False
if self.is_zeroa() or self.is_zeroa():
lowercase = set(["""cpu""", """nvme"""] )
lowercase = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.config
# find the config node of interest if it exists
lowercase = ds_key_long.split(""".""" )
lowercase = nodes.pop()
for node in nodes:
lowercase = config.get(__lowerCAmelCase )
if config is None:
return None, ds_key
return config, ds_key
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase , lowercase = self.find_config_node(__lowerCAmelCase )
if config is None:
return default
return config.get(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = self.config
# find the config node of interest if it exists
lowercase = ds_key_long.split(""".""" )
for node in nodes:
lowercase = config
lowercase = config.get(__lowerCAmelCase )
if config is None:
if must_exist:
raise ValueError(f'Can\'t find {ds_key_long} entry in the config: {self.config}' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.get_value(__lowerCAmelCase )
return False if value is None else bool(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.get_value(__lowerCAmelCase )
return False if value is None else not bool(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
return self._stage == 2
def A__ ( self ):
"""simple docstring"""
return self._stage == 3
def A__ ( self ):
"""simple docstring"""
return self._offload
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = engine
def A__ ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self.engine.backward(__lowerCAmelCase , **__lowerCAmelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , device_placement=__lowerCAmelCase , scaler=__lowerCAmelCase )
lowercase = hasattr(self.optimizer , """overflow""" )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def A__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def A__ ( self ):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=0.0_0_1 , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = params
lowercase = lr
lowercase = weight_decay
lowercase = kwargs
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = optimizer
lowercase = total_num_steps
lowercase = warmup_num_steps
lowercase = kwargs
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 1
|
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowerCAmelCase : Dict =logging.get_logger(__name__)
enable_full_determinism()
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Any = UNetaDModel
snake_case__ : Any = 'sample'
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 4
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def A__ ( self ):
"""simple docstring"""
lowercase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
lowercase = self.dummy_input
return init_dict, inputs_dict
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = UNetaDModel
snake_case__ : List[Any] = 'sample'
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 4
lowercase = 4
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor([10] ).to(__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def A__ ( self ):
"""simple docstring"""
return (4, 32, 32)
def A__ ( self ):
"""simple docstring"""
lowercase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
lowercase = self.dummy_input
return init_dict, inputs_dict
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
lowercase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model.to(__lowerCAmelCase )
lowercase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase )
model_accelerate.to(__lowerCAmelCase )
model_accelerate.eval()
lowercase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase = noise.to(__lowerCAmelCase )
lowercase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
lowercase = model_accelerate(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowercase , lowercase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__lowerCAmelCase , low_cpu_mem_usage=__lowerCAmelCase )
model_normal_load.to(__lowerCAmelCase )
model_normal_load.eval()
lowercase = model_normal_load(__lowerCAmelCase , __lowerCAmelCase )["""sample"""]
assert torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 )
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__lowerCAmelCase )
lowercase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase = noise.to(__lowerCAmelCase )
lowercase = torch.tensor([10] * noise.shape[0] ).to(__lowerCAmelCase )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
lowercase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-3 ) )
class _A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = UNetaDModel
snake_case__ : Union[str, Any] = 'sample'
@property
def A__ ( self , __lowerCAmelCase=(32, 32) ):
"""simple docstring"""
lowercase = 4
lowercase = 3
lowercase = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__lowerCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def A__ ( self ):
"""simple docstring"""
return (3, 32, 32)
def A__ ( self ):
"""simple docstring"""
lowercase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
lowercase = self.dummy_input
return init_dict, inputs_dict
@slow
def A__ ( self ):
"""simple docstring"""
lowercase , lowercase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowerCAmelCase )
lowercase = self.dummy_input
lowercase = floats_tensor((4, 3) + (256, 256) ).to(__lowerCAmelCase )
lowercase = noise
lowercase = model(**__lowerCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__lowerCAmelCase )
lowercase = 4
lowercase = 3
lowercase = (256, 256)
lowercase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor(batch_size * [1E-4] ).to(__lowerCAmelCase )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
lowercase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
def A__ ( self ):
"""simple docstring"""
lowercase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__lowerCAmelCase )
lowercase = 4
lowercase = 3
lowercase = (32, 32)
lowercase = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCAmelCase )
lowercase = torch.tensor(batch_size * [1E-4] ).to(__lowerCAmelCase )
with torch.no_grad():
lowercase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
lowercase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowercase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCAmelCase , __lowerCAmelCase , rtol=1E-2 ) )
def A__ ( self ):
"""simple docstring"""
pass
| 32
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) <= 1:
return lst
lowercase = 1
while i < len(lowerCAmelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowercase , lowercase = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowercase = 1
return lst
if __name__ == "__main__":
__lowerCAmelCase : str =input("""Enter numbers separated by a comma:\n""").strip()
__lowerCAmelCase : Optional[Any] =[int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 32
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
__lowerCAmelCase : List[str] =[num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> list[int]:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowercase = []
for num in range(len(lowerCAmelCase__ ) ):
lowercase = 0
while 2 * i * i <= odd_composites[num]:
lowercase = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase__ ) == n:
return list_nums
return []
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass(frozen=lowerCAmelCase )
class _A :
snake_case__ : str
snake_case__ : str
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
snake_case__ : Optional[str] = None
@dataclass(frozen=lowerCAmelCase )
class _A :
snake_case__ : List[int]
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[List[int]] = None
snake_case__ : Optional[Union[int, float]] = None
snake_case__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( lowerCAmelCase ):
snake_case__ : List[InputFeatures]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase=False , __lowerCAmelCase = False , ):
"""simple docstring"""
lowercase = hans_processors[task]()
lowercase = os.path.join(
__lowerCAmelCase , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(__lowerCAmelCase ) , __lowerCAmelCase , ) , )
lowercase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase = cached_features_file + """.lock"""
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not overwrite_cache:
logger.info(f'Loading features from cached file {cached_features_file}' )
lowercase = torch.load(__lowerCAmelCase )
else:
logger.info(f'Creating features from dataset file at {data_dir}' )
lowercase = (
processor.get_dev_examples(__lowerCAmelCase ) if evaluate else processor.get_train_examples(__lowerCAmelCase )
)
logger.info("""Training examples: %s""" , len(__lowerCAmelCase ) )
lowercase = hans_convert_examples_to_features(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase )
torch.save(self.features , __lowerCAmelCase )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
def A__ ( self ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
snake_case__ : List[InputFeatures]
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 128 , __lowerCAmelCase=False , __lowerCAmelCase = False , ):
"""simple docstring"""
lowercase = hans_processors[task]()
lowercase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase , lowercase = label_list[2], label_list[1]
lowercase = label_list
lowercase = processor.get_dev_examples(__lowerCAmelCase ) if evaluate else processor.get_train_examples(__lowerCAmelCase )
lowercase = hans_convert_examples_to_features(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(__lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase = tf.data.Dataset.from_generator(
__lowerCAmelCase , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A__ ( self ):
"""simple docstring"""
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , __lowerCAmelCase ):
"""simple docstring"""
return self.features[i]
def A__ ( self ):
"""simple docstring"""
return self.label_list
class _A ( lowerCAmelCase ):
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(__lowerCAmelCase , """heuristics_train_set.txt""" ) ) , """train""" )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(__lowerCAmelCase , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def A__ ( self ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = []
for i, line in enumerate(__lowerCAmelCase ):
if i == 0:
continue
lowercase = """%s-%s""" % (set_type, line[0])
lowercase = line[5]
lowercase = line[6]
lowercase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowercase = line[0]
examples.append(InputExample(guid=__lowerCAmelCase , text_a=__lowerCAmelCase , text_b=__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
return examples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[InputExample] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int , lowerCAmelCase__ :PreTrainedTokenizer , ) -> Optional[Any]:
'''simple docstring'''
lowercase = {label: i for i, label in enumerate(lowerCAmelCase__ )}
lowercase = []
for ex_index, example in tqdm.tqdm(enumerate(lowerCAmelCase__ ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowercase = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , truncation=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , )
lowercase = label_map[example.label] if example.label in label_map else 0
lowercase = int(example.pairID )
features.append(InputFeatures(**lowerCAmelCase__ , label=lowerCAmelCase__ , pairID=lowerCAmelCase__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
__lowerCAmelCase : Tuple ={
"""hans""": 3,
}
__lowerCAmelCase : str ={
"""hans""": HansProcessor,
}
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowercase = 2**power
lowercase = str(lowerCAmelCase__ )
lowercase = list(lowerCAmelCase__ )
lowercase = 0
for i in list_num:
sum_of_num += int(lowerCAmelCase__ )
return sum_of_num
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
__lowerCAmelCase : Dict =solution(power)
print("""Sum of the digits is: """, result)
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
__lowerCAmelCase : int ={
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = 'owlvit_text_model'
def __init__( self , __lowerCAmelCase=4_9408 , __lowerCAmelCase=512 , __lowerCAmelCase=2048 , __lowerCAmelCase=12 , __lowerCAmelCase=8 , __lowerCAmelCase=16 , __lowerCAmelCase="quick_gelu" , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1.0 , __lowerCAmelCase=0 , __lowerCAmelCase=4_9406 , __lowerCAmelCase=4_9407 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = max_position_embeddings
lowercase = hidden_act
lowercase = layer_norm_eps
lowercase = attention_dropout
lowercase = initializer_range
lowercase = initializer_factor
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
lowercase , lowercase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
lowercase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = 'owlvit_vision_model'
def __init__( self , __lowerCAmelCase=768 , __lowerCAmelCase=3072 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3 , __lowerCAmelCase=768 , __lowerCAmelCase=32 , __lowerCAmelCase="quick_gelu" , __lowerCAmelCase=1E-5 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1.0 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = hidden_size
lowercase = intermediate_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = num_channels
lowercase = image_size
lowercase = patch_size
lowercase = hidden_act
lowercase = layer_norm_eps
lowercase = attention_dropout
lowercase = initializer_range
lowercase = initializer_factor
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
lowercase , lowercase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
lowercase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'owlvit'
snake_case__ : List[Any] = True
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=512 , __lowerCAmelCase=2.6_5_9_2 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if text_config is None:
lowercase = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
lowercase = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
lowercase = OwlViTTextConfig(**__lowerCAmelCase )
lowercase = OwlViTVisionConfig(**__lowerCAmelCase )
lowercase = projection_dim
lowercase = logit_scale_init_value
lowercase = return_dict
lowercase = 1.0
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
cls._set_token_in_kwargs(__lowerCAmelCase )
lowercase , lowercase = cls.get_config_dict(__lowerCAmelCase , **__lowerCAmelCase )
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {}
lowercase = text_config
lowercase = vision_config
return cls.from_dict(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.text_config.to_dict()
lowercase = self.vision_config.to_dict()
lowercase = self.__class__.model_type
return output
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-4
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = -1 , __lowerCAmelCase = -1 , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__lowerCAmelCase , seq_length=__lowerCAmelCase , framework=__lowerCAmelCase )
lowercase = super().generate_dummy_inputs(
processor.image_processor , batch_size=__lowerCAmelCase , framework=__lowerCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def A__ ( self ):
"""simple docstring"""
return 14
| 32
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 1
|
"""simple docstring"""
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__lowerCAmelCase : Tuple =datasets.logging.get_logger(__name__)
__lowerCAmelCase : str ="""\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
__lowerCAmelCase : str ="""\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
__lowerCAmelCase : Dict ="""
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :Any="dummy_doc" ) -> int:
'''simple docstring'''
lowercase = {doc: key_lines}
lowercase = {doc: sys_lines}
lowercase = {}
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
lowercase = 0
lowercase , lowercase = reader.get_doc_mentions(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ )
key_singletons_num += singletons_num
if NP_only or min_span:
lowercase = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = reader.get_doc_mentions(lowerCAmelCase__ , sys_doc_lines[doc] , lowerCAmelCase__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
lowercase = reader.set_annotated_parse_trees(lowerCAmelCase__ , key_doc_lines[doc] , lowerCAmelCase__ , lowerCAmelCase__ )
if remove_nested:
lowercase , lowercase = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
lowercase , lowercase = reader.remove_nested_coref_mentions(lowerCAmelCase__ , lowerCAmelCase__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
lowercase = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = reader.get_mention_assignments(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
lowercase = get_coref_infos(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = {}
lowercase = 0
lowercase = 0
for name, metric in metrics:
lowercase , lowercase , lowercase = evaluator.evaluate_documents(lowerCAmelCase__ , lowerCAmelCase__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , f'Recall: {recall * 1_0_0:.2f}' , f' Precision: {precision * 1_0_0:.2f}' , f' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
lowercase = (conll / 3) * 1_0_0
logger.info(f'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> str:
'''simple docstring'''
lowercase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
lowercase = line.split()[5]
if not parse_col == "-":
lowercase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def A__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
lowercase = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
lowercase = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
_validate_point(lowerCAmelCase__ )
_validate_point(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for item in point:
if not isinstance(lowerCAmelCase__ , (int, float) ):
lowercase = (
"""Expected a list of numbers as input, found """
f'{type(lowerCAmelCase__ ).__name__}'
)
raise TypeError(lowerCAmelCase__ )
else:
lowercase = f'Expected a list of numbers as input, found {type(lowerCAmelCase__ ).__name__}'
raise TypeError(lowerCAmelCase__ )
else:
raise ValueError("""Missing an input""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
_validate_point(lowerCAmelCase__ )
_validate_point(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
| 1
|
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
lowercase = time.time()
locka.acquire(lowerCAmelCase__ )
assert time.time() - _start > timeout
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
lowercase = """a""" * 1_0_0_0 + """.lock"""
lowercase = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowerCAmelCase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
lowercase = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase__ ):
locka.acquire(0 )
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ="""T5Config"""
def UpperCAmelCase__ ( lowerCAmelCase__ :jnp.array , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> jnp.ndarray:
'''simple docstring'''
lowercase = jnp.zeros_like(lowerCAmelCase__ )
lowercase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowercase = shifted_input_ids.at[:, 0].set(lowerCAmelCase__ )
lowercase = jnp.where(shifted_input_ids == -1_0_0 , lowerCAmelCase__ , lowerCAmelCase__ )
return shifted_input_ids
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = 'mt5'
snake_case__ : List[str] = MTaConfig
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'mt5'
snake_case__ : Optional[int] = MTaConfig
class _A ( lowerCAmelCase ):
snake_case__ : Tuple = 'mt5'
snake_case__ : Optional[int] = MTaConfig
| 32
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 1
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _A ( lowerCAmelCase , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
snake_case__ : List[Any] = 'ssube/stable-diffusion-x4-upscaler-onnx'
def A__ ( self , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, 3, 128, 128) , rng=random.Random(__lowerCAmelCase ) )
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def A__ ( self ):
"""simple docstring"""
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A__ ( self ):
"""simple docstring"""
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A__ ( self ):
"""simple docstring"""
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def A__ ( self ):
"""simple docstring"""
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowercase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs()
lowercase = pipe(**__lowerCAmelCase ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _A ( unittest.TestCase ):
@property
def A__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A__ ( self ):
"""simple docstring"""
lowercase = ort.SessionOptions()
lowercase = False
return options
def A__ ( self ):
"""simple docstring"""
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__lowerCAmelCase , output_type="""np""" , )
lowercase = output.images
lowercase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def A__ ( self ):
"""simple docstring"""
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowercase = init_image.resize((128, 128) )
lowercase = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowercase = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A fantasy landscape, trending on artstation"""
lowercase = torch.manual_seed(0 )
lowercase = pipe(
prompt=__lowerCAmelCase , image=__lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__lowerCAmelCase , output_type="""np""" , )
lowercase = output.images
lowercase = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowercase = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 32
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def UpperCAmelCase__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowercase = {}
lowercase = 2
while True:
lowercase = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if factor:
lowercase = factor + prime
while x in factor_map:
x += factor
lowercase = factor
else:
lowercase = prime
yield prime
prime += 1
def UpperCAmelCase__ ( lowerCAmelCase__ :float = 1e10 ) -> int:
'''simple docstring'''
lowercase = sieve()
lowercase = 1
while True:
lowercase = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
lowercase = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 1
|
"""simple docstring"""
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__lowerCAmelCase : Any ="""http://www.mocksite.com/file1.txt"""
__lowerCAmelCase : int ="""\"text\": [\"foo\", \"foo\"]"""
__lowerCAmelCase : Tuple ="""6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"""
class _A :
snake_case__ : List[Any] = 200
snake_case__ : Any = {'Content-Length': '100'}
snake_case__ : Optional[int] = {}
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
return [bytes(__lowerCAmelCase , """utf-8""" )]
def UpperCAmelCase__ ( *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :List[str] ) -> List[str]:
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict] )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
import requests
monkeypatch.setattr(lowerCAmelCase__ , """request""" , lowerCAmelCase__ )
lowercase = URL
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = url
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = [url]
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = {"""train""": url}
lowercase = """dummy"""
lowercase = """downloads"""
lowercase = tmp_path
lowercase = DownloadConfig(
cache_dir=os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , use_etag=lowerCAmelCase__ , )
lowercase = DownloadManager(dataset_name=lowerCAmelCase__ , download_config=lowerCAmelCase__ )
lowercase = dl_manager.download(lowerCAmelCase__ )
lowercase = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = [downloaded_paths]
lowercase = [urls]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
assert "train" in downloaded_paths.keys()
lowercase = downloaded_paths.values()
lowercase = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
lowercase = Path(lowerCAmelCase__ )
lowercase = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
lowercase = downloaded_path.read_text()
assert content == CONTENT
lowercase = downloaded_path.with_suffix(""".json""" )
assert metadata_downloaded_path.exists()
lowercase = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict] )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = str(lowerCAmelCase__ )
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = filename
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = [filename]
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = {"""train""": filename}
lowercase = """dummy"""
lowercase = xz_file.parent
lowercase = """extracted"""
lowercase = DownloadConfig(
cache_dir=lowerCAmelCase__ , use_etag=lowerCAmelCase__ , )
lowercase = DownloadManager(dataset_name=lowerCAmelCase__ , download_config=lowerCAmelCase__ )
lowercase = dl_manager.extract(lowerCAmelCase__ )
lowercase = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = [extracted_paths]
lowercase = [paths]
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
assert "train" in extracted_paths.keys()
lowercase = extracted_paths.values()
lowercase = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
lowercase = Path(lowerCAmelCase__ )
lowercase = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCAmelCase__ , etag=lowerCAmelCase__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
lowercase = extracted_path.read_text()
lowercase = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
assert path.endswith(""".jsonl""" )
for num_items, line in enumerate(lowerCAmelCase__ , start=1 ):
lowercase = json.loads(line.decode("""utf-8""" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
lowercase = request.getfixturevalue(lowerCAmelCase__ )
lowercase = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
_test_jsonl(lowerCAmelCase__ , lowerCAmelCase__ )
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase = request.getfixturevalue(lowerCAmelCase__ )
lowercase = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCAmelCase__ ) , start=1 ):
_test_jsonl(lowerCAmelCase__ , lowerCAmelCase__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCAmelCase__ ) , start=1 ):
assert os.path.basename(lowerCAmelCase__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32
| 1
|
"""simple docstring"""
__lowerCAmelCase : Dict ={
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 32
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
| 1
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _A ( lowerCAmelCase ):
snake_case__ : UNetaDModel
snake_case__ : KarrasVeScheduler
def __init__( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 50 , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = self.unet.config.sample_size
lowercase = (batch_size, 3, img_size, img_size)
lowercase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowercase = self.scheduler.schedule[t]
lowercase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase , lowercase = self.scheduler.add_noise_to_input(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowercase = self.scheduler.step_correct(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , step_output.prev_sample , step_output["""derivative"""] , )
lowercase = step_output.prev_sample
lowercase = (sample / 2 + 0.5).clamp(0 , 1 )
lowercase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowerCAmelCase : Dict ={"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class _A ( unittest.TestCase ):
snake_case__ : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case__ : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case__ : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case__ : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] )
lowercase = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier("""This is great !""" , top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
# Legacy behavior
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
lowercase = text_classifier("""This is great !""" , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}],
] , )
lowercase = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
{"""label""": """LABEL_0""", """score""": 0.5_0_4},
] , )
@require_torch
def A__ ( self ):
"""simple docstring"""
import torch
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline(
task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] )
@slow
@require_torch
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
@slow
@require_tf
def A__ ( self ):
"""simple docstring"""
lowercase = pipeline("""text-classification""" , framework="""tf""" )
lowercase = text_classifier("""This is great !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 1.0}] )
lowercase = text_classifier("""This is bad !""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] )
lowercase = text_classifier("""Birds are a type of animal""" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = TextClassificationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowercase = """HuggingFace is in"""
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
lowercase = ["""HuggingFace is in """, """Paris is in France"""]
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}, {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowercase = text_classifier(__lowerCAmelCase , top_k=__lowerCAmelCase )
lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [[{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N, [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] * N] , )
lowercase = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowercase = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )} , )
self.assertTrue(outputs["""label"""] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowercase = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowercase = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [{"""label""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
| 32
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 1
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> str:
'''simple docstring'''
lowercase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
lowercase = features.copy()
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = tmp_path / """cache"""
lowercase = JsonDatasetReader(lowerCAmelCase__ , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] ) -> Any:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , split=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Optional[Any] ) -> List[Any]:
'''simple docstring'''
if issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = jsonl_path
elif issubclass(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = [jsonl_path]
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_dataset(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str=("train",) ) -> Tuple:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
for split in splits:
lowercase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] ) -> List[str]:
'''simple docstring'''
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = features.copy() if features else default_expected_features
lowercase = (
Features({feature: Value(lowerCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase = JsonDatasetReader({"""train""": jsonl_path} , features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Tuple ) -> Optional[Any]:
'''simple docstring'''
if split:
lowercase = {split: jsonl_path}
else:
lowercase = """train"""
lowercase = {"""train""": jsonl_path, """test""": jsonl_path}
lowercase = tmp_path / """cache"""
lowercase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase = JsonDatasetReader(lowerCAmelCase__ , cache_dir=lowerCAmelCase__ ).read()
_check_json_datasetdict(lowerCAmelCase__ , lowerCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
return json.load(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
return [json.loads(lowerCAmelCase__ ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase ).write()
buffer.seek(0 )
lowercase = load_json_function(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(exported_content[0] , __lowerCAmelCase )
assert len(__lowerCAmelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase , orient=__lowerCAmelCase ).write()
buffer.seek(0 )
lowercase = load_json(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCAmelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCAmelCase ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
lowercase = load_json_function(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(exported_content[0] , __lowerCAmelCase )
assert len(__lowerCAmelCase ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , lines=__lowerCAmelCase , orient=__lowerCAmelCase , num_proc=2 ).write()
buffer.seek(0 )
lowercase = load_json(__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(__lowerCAmelCase , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(__lowerCAmelCase ) == 10
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
with pytest.raises(__lowerCAmelCase ):
with io.BytesIO() as buffer:
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = tmp_path_factory.mktemp("""data""" ) / f'test.json.{extension}'
lowercase = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(__lowerCAmelCase , __lowerCAmelCase , compression=__lowerCAmelCase ).write()
with fsspec.open(__lowerCAmelCase , """rb""" , compression="""infer""" ) as f:
lowercase = f.read()
with fsspec.open(__lowerCAmelCase , """rb""" , compression="""infer""" ) as f:
lowercase = f.read()
assert exported_content == original_content
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCAmelCase : List[str] =logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCAmelCase : Union[str, Any] =2_5_6
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = ['melgan']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
# From MELGAN
lowercase = math.log(1E-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 128
self.register_modules(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(__lowerCAmelCase , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=(-1.0, 1.0) , __lowerCAmelCase=False ):
"""simple docstring"""
lowercase , lowercase = input_range
lowercase = torch.clip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=__lowerCAmelCase , encoder_inputs_mask=__lowerCAmelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = noise_time
if not torch.is_tensor(__lowerCAmelCase ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__lowerCAmelCase ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=__lowerCAmelCase , decoder_input_tokens=__lowerCAmelCase , decoder_noise_time=__lowerCAmelCase )
return logits
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 100 , __lowerCAmelCase = True , __lowerCAmelCase = "numpy" , __lowerCAmelCase = None , __lowerCAmelCase = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__lowerCAmelCase )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
for i, encoder_input_tokens in enumerate(__lowerCAmelCase ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowerCAmelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
__lowerCAmelCase , output_range=[-1.0, 1.0] , clip=__lowerCAmelCase )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__lowerCAmelCase , continuous_mask=__lowerCAmelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowerCAmelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowerCAmelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=__lowerCAmelCase , input_tokens=__lowerCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
lowercase = self.scale_to_features(__lowerCAmelCase , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase )
logger.info("""Generated segment""" , __lowerCAmelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
"""simple docstring"""
import itertools
import string
from collections.abc import Generator, Iterable
def UpperCAmelCase__ ( lowerCAmelCase__ :Iterable[str] , lowerCAmelCase__ :int ) -> Generator[tuple[str, ...], None, None]:
'''simple docstring'''
lowercase = iter(lowerCAmelCase__ )
while True:
lowercase = tuple(itertools.islice(lowerCAmelCase__ , lowerCAmelCase__ ) )
if not chunk:
return
yield chunk
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase = """"""
if len(lowerCAmelCase__ ) < 2:
return dirty
for i in range(len(lowerCAmelCase__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCAmelCase__ ) & 1:
clean += "X"
return clean
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> list[str]:
'''simple docstring'''
lowercase = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCAmelCase__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCAmelCase__ )
return table
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = generate_table(lowerCAmelCase__ )
lowercase = prepare_input(lowerCAmelCase__ )
lowercase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase__ , 2 ):
lowercase , lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
lowercase , lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = generate_table(lowerCAmelCase__ )
lowercase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCAmelCase__ , 2 ):
lowercase , lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
lowercase , lowercase = divmod(table.index(lowerCAmelCase__ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCAmelCase : Any =TypeVar("""T""")
class _A ( Generic[T] ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
lowercase = self
lowercase = 0
class _A ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowercase = {}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = DisjointSetTreeNode(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.map[data]
if elem_ref != elem_ref.parent:
lowercase = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if nodea.rank > nodea.rank:
lowercase = nodea
else:
lowercase = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
self.link(self.find_set(__lowerCAmelCase ) , self.find_set(__lowerCAmelCase ) )
class _A ( Generic[T] ):
def __init__( self ):
"""simple docstring"""
lowercase = {}
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if node not in self.connections:
lowercase = {}
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
lowercase = weight
lowercase = weight
def A__ ( self ):
"""simple docstring"""
lowercase = []
lowercase = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __lowerCAmelCase : x[2] )
# creating the disjoint set
lowercase = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(__lowerCAmelCase )
# MST generation
lowercase = 0
lowercase = 0
lowercase = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowercase , lowercase , lowercase = edges[index]
index += 1
lowercase = disjoint_set.find_set(__lowerCAmelCase )
lowercase = disjoint_set.find_set(__lowerCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
disjoint_set.union(__lowerCAmelCase , __lowerCAmelCase )
return graph
| 32
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 1
|
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowerCAmelCase : Union[str, Any] =HfArgumentParser(InitializationArguments)
__lowerCAmelCase : Dict =parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowerCAmelCase : Optional[int] =AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowerCAmelCase : Union[str, Any] ={
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
__lowerCAmelCase : Union[str, Any] =AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowerCAmelCase : Optional[int] =AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 32
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 1
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 1
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = 0
if start < end:
lowercase = randint(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = a[end]
lowercase = a[pivot]
lowercase = temp
lowercase , lowercase = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ )
return count
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
lowercase = randint(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = a[end]
lowercase = a[pivot]
lowercase = temp
lowercase = start - 1
for index in range(lowerCAmelCase__ , lowerCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowercase = new_pivot_index + 1
lowercase = a[new_pivot_index]
lowercase = a[index]
lowercase = temp
lowercase = a[new_pivot_index + 1]
lowercase = a[end]
lowercase = temp
return new_pivot_index + 1, count
__lowerCAmelCase : List[Any] =TemporaryFile()
__lowerCAmelCase : int =1_0_0 # 1000 elements are to be sorted
__lowerCAmelCase , __lowerCAmelCase : int =0, 1 # mean and standard deviation
__lowerCAmelCase : str =np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("""The array is""")
print(X)
outfile.seek(0) # using the same array
__lowerCAmelCase : Union[str, Any] =np.load(outfile)
__lowerCAmelCase : int =len(M) - 1
__lowerCAmelCase : List[str] =_in_place_quick_sort(M, 0, r)
print(
"""No of Comparisons for 100 elements selected from a standard normal distribution"""
"""is :"""
)
print(z)
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
from collections import defaultdict
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
lowercase = 1
lowercase = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase__ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase__ )
return ret
def UpperCAmelCase__ ( ) -> Optional[int]:
'''simple docstring'''
dfs(1 )
if __name__ == "__main__":
__lowerCAmelCase , __lowerCAmelCase : List[str] =1_0, 9
__lowerCAmelCase : Union[str, Any] =defaultdict(list)
__lowerCAmelCase : dict[int, bool] ={}
__lowerCAmelCase : list[int] =[]
__lowerCAmelCase : Optional[int] =0
__lowerCAmelCase : Any =[(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (1_0, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
__lowerCAmelCase : Any ={
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = 'tapas'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1024 , __lowerCAmelCase=[3, 256, 256, 2, 256, 256, 10] , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0 , __lowerCAmelCase=1_0.0 , __lowerCAmelCase=0 , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=1.0 , __lowerCAmelCase=1.0 , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase="ratio" , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=64 , __lowerCAmelCase=32 , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_sizes
lowercase = initializer_range
lowercase = layer_norm_eps
# Fine-tuning task hyperparameters
lowercase = positive_label_weight
lowercase = num_aggregation_labels
lowercase = aggregation_loss_weight
lowercase = use_answer_as_supervision
lowercase = answer_loss_importance
lowercase = use_normalized_answer_loss
lowercase = huber_loss_delta
lowercase = temperature
lowercase = aggregation_temperature
lowercase = use_gumbel_for_cells
lowercase = use_gumbel_for_aggregation
lowercase = average_approximation_function
lowercase = cell_selection_preference
lowercase = answer_loss_cutoff
lowercase = max_num_rows
lowercase = max_num_columns
lowercase = average_logits_per_cell
lowercase = select_one_column
lowercase = allow_empty_column_selection
lowercase = init_cell_selection_weights_to_zero
lowercase = reset_position_index_per_cell
lowercase = disable_per_token_loss
# Aggregation hyperparameters
lowercase = aggregation_labels
lowercase = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCAmelCase ):
lowercase = {int(__lowerCAmelCase ): v for k, v in aggregation_labels.items()}
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class _A ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def A__ ( self ):
"""simple docstring"""
lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase , lowercase , lowercase = hidden_states.shape
lowercase = jax.image.resize(
__lowerCAmelCase , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
lowercase = self.conv(__lowerCAmelCase )
return hidden_states
class _A ( nn.Module ):
snake_case__ : int
snake_case__ : jnp.dtype = jnp.floataa
def A__ ( self ):
"""simple docstring"""
lowercase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.conv(__lowerCAmelCase )
return hidden_states
class _A ( nn.Module ):
snake_case__ : int
snake_case__ : int = None
snake_case__ : float = 0.0
snake_case__ : bool = None
snake_case__ : jnp.dtype = jnp.floataa
def A__ ( self ):
"""simple docstring"""
lowercase = self.in_channels if self.out_channels is None else self.out_channels
lowercase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = nn.Dense(__lowerCAmelCase , dtype=self.dtype )
lowercase = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
lowercase = nn.Dropout(self.dropout_prob )
lowercase = nn.Conv(
__lowerCAmelCase , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowercase = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
lowercase = None
if use_nin_shortcut:
lowercase = nn.Conv(
__lowerCAmelCase , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ):
"""simple docstring"""
lowercase = hidden_states
lowercase = self.norma(__lowerCAmelCase )
lowercase = nn.swish(__lowerCAmelCase )
lowercase = self.conva(__lowerCAmelCase )
lowercase = self.time_emb_proj(nn.swish(__lowerCAmelCase ) )
lowercase = jnp.expand_dims(jnp.expand_dims(__lowerCAmelCase , 1 ) , 1 )
lowercase = hidden_states + temb
lowercase = self.norma(__lowerCAmelCase )
lowercase = nn.swish(__lowerCAmelCase )
lowercase = self.dropout(__lowerCAmelCase , __lowerCAmelCase )
lowercase = self.conva(__lowerCAmelCase )
if self.conv_shortcut is not None:
lowercase = self.conv_shortcut(__lowerCAmelCase )
return hidden_states + residual
| 32
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 1
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : List[str] = ['pixel_values']
def __init__( self , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 0.9 , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 / 255 , __lowerCAmelCase = True , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = size if size is not None else {"""shortest_edge""": 224}
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowercase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
lowercase = do_resize
lowercase = size
lowercase = crop_pct
lowercase = resample
lowercase = do_center_crop
lowercase = crop_size
lowercase = do_rescale
lowercase = rescale_factor
lowercase = do_normalize
lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = PILImageResampling.BICUBIC , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase = int(size["""height"""] / crop_pct )
else:
lowercase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowerCAmelCase ) )
lowercase = get_resize_output_image_size(__lowerCAmelCase , size=__lowerCAmelCase , default_to_square=__lowerCAmelCase )
else:
if "shortest_edge" in size:
lowercase = get_resize_output_image_size(__lowerCAmelCase , size=size["""shortest_edge"""] , default_to_square=__lowerCAmelCase )
elif "height" in size and "width" in size:
lowercase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowerCAmelCase ) )
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = get_size_dict(__lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(__lowerCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
return normalize(__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = ChannelDimension.FIRST , **__lowerCAmelCase , ):
"""simple docstring"""
lowercase = do_resize if do_resize is not None else self.do_resize
lowercase = crop_pct if crop_pct is not None else self.crop_pct
lowercase = resample if resample is not None else self.resample
lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase = do_rescale if do_rescale is not None else self.do_rescale
lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase = do_normalize if do_normalize is not None else self.do_normalize
lowercase = image_mean if image_mean is not None else self.image_mean
lowercase = image_std if image_std is not None else self.image_std
lowercase = size if size is not None else self.size
lowercase = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase )
lowercase = crop_size if crop_size is not None else self.crop_size
lowercase = get_size_dict(__lowerCAmelCase , param_name="""crop_size""" )
lowercase = make_list_of_images(__lowerCAmelCase )
if not valid_images(__lowerCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase = [to_numpy_array(__lowerCAmelCase ) for image in images]
if do_resize:
lowercase = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , crop_pct=__lowerCAmelCase , resample=__lowerCAmelCase ) for image in images]
if do_center_crop:
lowercase = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase ) for image in images]
if do_rescale:
lowercase = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase ) for image in images]
if do_normalize:
lowercase = [self.normalize(image=__lowerCAmelCase , mean=__lowerCAmelCase , std=__lowerCAmelCase ) for image in images]
lowercase = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase ) for image in images]
lowercase = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__lowerCAmelCase : int ={"""UserAgent""": UserAgent().random}
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> dict:
'''simple docstring'''
lowercase = script.contents[0]
lowercase = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = f'https://www.instagram.com/{username}/'
lowercase = self.get_json()
def A__ ( self ):
"""simple docstring"""
lowercase = requests.get(self.url , headers=__lowerCAmelCase ).text
lowercase = BeautifulSoup(__lowerCAmelCase , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
"""simple docstring"""
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self ):
"""simple docstring"""
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["username"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["full_name"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["biography"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["business_email"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["external_url"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["edge_followed_by"]["count"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["edge_follow"]["count"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["profile_pic_url_hd"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["is_verified"]
@property
def A__ ( self ):
"""simple docstring"""
return self.user_data["is_private"]
def UpperCAmelCase__ ( lowerCAmelCase__ :str = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
lowercase = InstagramUser(lowerCAmelCase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCAmelCase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Any =InstagramUser("""github""")
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__lowerCAmelCase : Any ={
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class _A ( lowerCAmelCase ):
snake_case__ : str = 'facebook/nllb-200-distilled-600M'
snake_case__ : str = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ : Optional[int] = 'translator'
snake_case__ : int = AutoTokenizer
snake_case__ : str = AutoModelForSeqaSeqLM
snake_case__ : List[Any] = LANGUAGE_CODES
snake_case__ : Optional[Any] = ['text', 'text', 'text']
snake_case__ : List[str] = ['text']
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
lowercase = self.lang_to_code[src_lang]
lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowerCAmelCase , return_tensors="""pt""" , src_lang=__lowerCAmelCase , tgt_lang=__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.model.generate(**__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 1
|
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowercase , lowercase = get_aligned_output_features_output_indices(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""c"""] )
self.assertEqual(__lowerCAmelCase , [2] )
# Out indices set to match out features
lowercase , lowercase = get_aligned_output_features_output_indices(["""a""", """c"""] , __lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features set to match out indices
lowercase , lowercase = get_aligned_output_features_output_indices(__lowerCAmelCase , [0, 2] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(__lowerCAmelCase , [0, 2] )
# Out features selected from negative indices
lowercase , lowercase = get_aligned_output_features_output_indices(__lowerCAmelCase , [-3, -1] , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , ["""a""", """c"""] )
self.assertEqual(__lowerCAmelCase , [-3, -1] )
def A__ ( self ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , __lowerCAmelCase )
# Out features must be a list
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(__lowerCAmelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(__lowerCAmelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def A__ ( self ):
"""simple docstring"""
lowercase = BackboneMixin()
lowercase = ["""a""", """b""", """c"""]
lowercase = ["""a""", """c"""]
lowercase = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowercase = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowercase = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 32
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 1
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> Dict:
'''simple docstring'''
if openai_config_file == "":
lowercase = OpenAIGPTConfig()
else:
lowercase = OpenAIGPTConfig.from_json_file(lowerCAmelCase__ )
lowercase = OpenAIGPTModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
lowercase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowercase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCAmelCase : List[str] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__lowerCAmelCase : str =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 1
|
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
# General docstring
__lowerCAmelCase : Optional[int] ="""ResNetConfig"""
# Base docstring
__lowerCAmelCase : Dict ="""microsoft/resnet-50"""
__lowerCAmelCase : int =[1, 2_0_4_8, 7, 7]
# Image classification docstring
__lowerCAmelCase : List[Any] ="""microsoft/resnet-50"""
__lowerCAmelCase : Any ="""tiger cat"""
__lowerCAmelCase : List[Any] =[
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 3 , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" ):
"""simple docstring"""
super().__init__()
lowercase = nn.Convad(
__lowerCAmelCase , __lowerCAmelCase , kernel_size=__lowerCAmelCase , stride=__lowerCAmelCase , padding=kernel_size // 2 , bias=__lowerCAmelCase )
lowercase = nn.BatchNormad(__lowerCAmelCase )
lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.convolution(__lowerCAmelCase )
lowercase = self.normalization(__lowerCAmelCase )
lowercase = self.activation(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowercase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowercase = config.num_channels
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase = self.embedder(__lowerCAmelCase )
lowercase = self.pooler(__lowerCAmelCase )
return embedding
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 ):
"""simple docstring"""
super().__init__()
lowercase = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , stride=__lowerCAmelCase , bias=__lowerCAmelCase )
lowercase = nn.BatchNormad(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.convolution(__lowerCAmelCase )
lowercase = self.normalization(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" ):
"""simple docstring"""
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , activation=__lowerCAmelCase ) , )
lowercase = ACTaFN[activation]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = hidden_state
lowercase = self.layer(__lowerCAmelCase )
lowercase = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowercase = self.activation(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 4 ):
"""simple docstring"""
super().__init__()
lowercase = in_channels != out_channels or stride != 1
lowercase = out_channels // reduction
lowercase = (
ResNetShortCut(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowercase = nn.Sequential(
ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase ) , ResNetConvLayer(__lowerCAmelCase , __lowerCAmelCase , kernel_size=1 , activation=__lowerCAmelCase ) , )
lowercase = ACTaFN[activation]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = hidden_state
lowercase = self.layer(__lowerCAmelCase )
lowercase = self.shortcut(__lowerCAmelCase )
hidden_state += residual
lowercase = self.activation(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 2 , ):
"""simple docstring"""
super().__init__()
lowercase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(__lowerCAmelCase , __lowerCAmelCase , stride=__lowerCAmelCase , activation=config.hidden_act ) , *[layer(__lowerCAmelCase , __lowerCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = input
for layer in self.layers:
lowercase = layer(__lowerCAmelCase )
return hidden_state
class _A ( nn.Module ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
__lowerCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__lowerCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , depth=__lowerCAmelCase ) )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = True ):
"""simple docstring"""
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(__lowerCAmelCase )
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=__lowerCAmelCase , hidden_states=__lowerCAmelCase , )
class _A ( lowerCAmelCase ):
snake_case__ : Dict = ResNetConfig
snake_case__ : Optional[int] = 'resnet'
snake_case__ : List[Any] = 'pixel_values'
snake_case__ : Any = True
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=False ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = value
__lowerCAmelCase : List[str] =R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
__lowerCAmelCase : Optional[Any] =R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , lowerCAmelCase , )
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = config
lowercase = ResNetEmbeddings(__lowerCAmelCase )
lowercase = ResNetEncoder(__lowerCAmelCase )
lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(__lowerCAmelCase )
lowercase = self.encoder(
__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowercase = encoder_outputs[0]
lowercase = self.pooler(__lowerCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__lowerCAmelCase , pooler_output=__lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase , )
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = config.num_labels
lowercase = ResNetModel(__lowerCAmelCase )
# classification head
lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
"""simple docstring"""
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.resnet(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier(__lowerCAmelCase )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(__lowerCAmelCase , __lowerCAmelCase )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__lowerCAmelCase , logits=__lowerCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , lowerCAmelCase , )
class _A ( lowerCAmelCase , lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
super()._init_backbone(__lowerCAmelCase )
lowercase = [config.embedding_size] + config.hidden_sizes
lowercase = ResNetEmbeddings(__lowerCAmelCase )
lowercase = ResNetEncoder(__lowerCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
@replace_return_docstrings(output_type=__lowerCAmelCase , config_class=_CONFIG_FOR_DOC )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = self.embedder(__lowerCAmelCase )
lowercase = self.encoder(__lowerCAmelCase , output_hidden_states=__lowerCAmelCase , return_dict=__lowerCAmelCase )
lowercase = outputs.hidden_states
lowercase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowercase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=__lowerCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=__lowerCAmelCase , )
| 32
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :bool , lowerCAmelCase__ :list[int] , lowerCAmelCase__ :float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(lowerCAmelCase__ ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
lowercase = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
lowercase = math.log(len(lowerCAmelCase__ ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32
| 1
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[str] =[
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
__lowerCAmelCase : Union[str, Any] =[
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
return sd
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple=rename_keys_prefix ) -> List[str]:
'''simple docstring'''
lowercase = OrderedDict()
lowercase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase = key
for name_pair in rename_keys_prefix:
lowercase = new_key.replace(name_pair[0] , name_pair[1] )
lowercase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowercase = """pretraining"""
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2_0_4_8}
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 1_0_2_4}
else:
raise NotImplementedError(f'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 5_1_2}
lowercase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2_0_4_8}
lowercase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowercase = {"""visual_embedding_dim""": 2_0_4_8, """num_labels""": 3_1_2_9}
lowercase = """vqa"""
elif "nlvr" in checkpoint_path:
lowercase = {
"""visual_embedding_dim""": 1_0_2_4,
"""num_labels""": 2,
}
lowercase = """nlvr"""
lowercase = VisualBertConfig(**lowerCAmelCase__ )
# Load State Dict
lowercase = load_state_dict(lowerCAmelCase__ )
lowercase = get_new_dict(lowerCAmelCase__ , lowerCAmelCase__ )
if model_type == "pretraining":
lowercase = VisualBertForPreTraining(lowerCAmelCase__ )
elif model_type == "vqa":
lowercase = VisualBertForQuestionAnswering(lowerCAmelCase__ )
elif model_type == "nlvr":
lowercase = VisualBertForVisualReasoning(lowerCAmelCase__ )
elif model_type == "multichoice":
lowercase = VisualBertForMultipleChoice(lowerCAmelCase__ )
model.load_state_dict(lowerCAmelCase__ )
# Save Checkpoints
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""orig_checkpoint_path""", type=str, help="""A path to .th on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", type=str, help="""Path to the output PyTorch model.""")
__lowerCAmelCase : Dict =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 32
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
| 1
|
"""simple docstring"""
import collections
import importlib.util
import os
import re
from pathlib import Path
__lowerCAmelCase : List[str] ="""src/transformers"""
# Matches is_xxx_available()
__lowerCAmelCase : Optional[int] =re.compile(R"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
__lowerCAmelCase : Any =re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__lowerCAmelCase : str =re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
__lowerCAmelCase : str =re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
__lowerCAmelCase : Tuple =re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__lowerCAmelCase : Any =re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
__lowerCAmelCase : Optional[Any] =re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
__lowerCAmelCase : Optional[Any] =re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
__lowerCAmelCase : Tuple =re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
__lowerCAmelCase : Optional[int] =re.compile(R"""^\s*try:""")
# Catches a line with else:
__lowerCAmelCase : List[str] =re.compile(R"""^\s*else:""")
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
if _re_test_backend.search(lowerCAmelCase__ ) is None:
return None
lowercase = [b[0] for b in _re_backend.findall(lowerCAmelCase__ )]
backends.sort()
return "_and_".join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase = f.readlines()
lowercase = 0
while line_index < len(lowerCAmelCase__ ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCAmelCase__ ):
return None
# First grab the objects without a specific backend in _import_structure
lowercase = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCAmelCase__ ):
lowercase = _re_one_line_import_struct.search(lowerCAmelCase__ ).groups()[0]
lowercase = re.findall("""\[([^\]]+)\]""" , lowerCAmelCase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
lowercase = _re_import_struct_key_value.search(lowerCAmelCase__ )
if single_line_import_search is not None:
lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
lowercase = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
lowercase = lines[line_index]
if _re_import_struct_add_one.search(lowerCAmelCase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowerCAmelCase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowerCAmelCase__ ) is not None:
lowercase = _re_import_struct_add_many.search(lowerCAmelCase__ ).groups()[0].split(""", """ )
lowercase = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_between_brackets.search(lowerCAmelCase__ ) is not None:
lowercase = _re_between_brackets.search(lowerCAmelCase__ ).groups()[0].split(""", """ )
lowercase = [obj[1:-1] for obj in imports if len(lowerCAmelCase__ ) > 0]
objects.extend(lowerCAmelCase__ )
elif _re_quote_object.search(lowerCAmelCase__ ) is not None:
objects.append(_re_quote_object.search(lowerCAmelCase__ ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 1_2 + """\"""" ):
objects.append(line[1_3:-3] )
line_index += 1
lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
lowercase = []
while (
line_index < len(lowerCAmelCase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
lowercase = lines[line_index]
lowercase = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
lowercase = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCAmelCase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
lowercase = lines[line_index]
lowercase = _re_import.search(lowerCAmelCase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
def find_duplicates(lowerCAmelCase__ :int ):
return [k for k, v in collections.Counter(lowerCAmelCase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
lowercase = []
for key in import_dict_objects.keys():
lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'Duplicate _import_structure definitions for: {duplicate_imports}' )
lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
lowercase = """base imports""" if key == """none""" else f'{key} backend'
errors.append(f'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
lowercase = os.path.join(lowerCAmelCase__ , """__init__.py""" )
lowercase = parse_init(lowerCAmelCase__ )
if objects is not None:
lowercase = analyze_results(*lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = f'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append("""\n""".join(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) > 0:
raise ValueError("""\n\n""".join(lowerCAmelCase__ ) )
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = []
for path, directories, files in os.walk(lowerCAmelCase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(lowerCAmelCase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCAmelCase__ ) / folder).glob("""*.py""" ) ) ) == 0:
continue
lowercase = str((Path(lowerCAmelCase__ ) / folder).relative_to(lowerCAmelCase__ ) )
lowercase = short_path.replace(os.path.sep , """.""" )
submodules.append(lowerCAmelCase__ )
for fname in files:
if fname == "__init__.py":
continue
lowercase = str((Path(lowerCAmelCase__ ) / fname).relative_to(lowerCAmelCase__ ) )
lowercase = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(lowerCAmelCase__ )
return submodules
__lowerCAmelCase : str =[
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def UpperCAmelCase__ ( ) -> List[Any]:
'''simple docstring'''
lowercase = importlib.util.spec_from_file_location(
"""transformers""" , os.path.join(lowerCAmelCase__ , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
lowercase = spec.loader.load_module()
lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowerCAmelCase__ ) > 0:
lowercase = """\n""".join(f'- {module}' for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registered in the main init of Transformers:\n"""
f'{list_of_modules}\n'
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 1
|
"""simple docstring"""
import json
import sys
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> Optional[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as f:
lowercase = json.load(lowerCAmelCase__ )
lowercase = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCAmelCase__ ):
lowercase = results[benchmark_name]
lowercase = benchmark_name.split("""/""" )[-1]
output_md.append(f'### Benchmark: {benchmark_file_name}' )
lowercase = """| metric |"""
lowercase = """|--------|"""
lowercase = """| new / old (diff) |"""
for metric_name in sorted(lowerCAmelCase__ ):
lowercase = benchmark_res[metric_name]
lowercase = metric_vals["""new"""]
lowercase = metric_vals.get("""old""" , lowerCAmelCase__ )
lowercase = metric_vals.get("""diff""" , lowerCAmelCase__ )
lowercase = f' {new_val:f}' if isinstance(lowerCAmelCase__ , (int, float) ) else """None"""
if old_val is not None:
val_str += f' / {old_val:f}' if isinstance(lowerCAmelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += f' ({dif_val:f})' if isinstance(lowerCAmelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCAmelCase__ ) )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] =sys.argv[1]
__lowerCAmelCase : List[Any] =sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 32
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 1
|
"""simple docstring"""
import math
import unittest
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def A__ ( self ):
"""simple docstring"""
with self.assertRaises(__lowerCAmelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> int:
'''simple docstring'''
lowercase = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowercase = 1_2_8
elif "12-12" in model_name:
lowercase = 1_2
lowercase = 1_2
elif "14-14" in model_name:
lowercase = 1_4
lowercase = 1_4
elif "16-16" in model_name:
lowercase = 1_6
lowercase = 1_6
else:
raise ValueError("""Model not supported""" )
lowercase = """huggingface/label-files"""
if "speech-commands" in model_name:
lowercase = 3_5
lowercase = """speech-commands-v2-id2label.json"""
else:
lowercase = 5_2_7
lowercase = """audioset-id2label.json"""
lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
return config
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
if "module.v" in name:
lowercase = name.replace("""module.v""" , """audio_spectrogram_transformer""" )
if "cls_token" in name:
lowercase = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "dist_token" in name:
lowercase = name.replace("""dist_token""" , """embeddings.distillation_token""" )
if "pos_embed" in name:
lowercase = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
# transformer blocks
if "blocks" in name:
lowercase = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowercase = name.replace("""audio_spectrogram_transformer.norm""" , """audio_spectrogram_transformer.layernorm""" )
# classifier head
if "module.mlp_head.0" in name:
lowercase = name.replace("""module.mlp_head.0""" , """classifier.layernorm""" )
if "module.mlp_head.1" in name:
lowercase = name.replace("""module.mlp_head.1""" , """classifier.dense""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase = [
"""module.v.head.weight""",
"""module.v.head.bias""",
"""module.v.head_dist.weight""",
"""module.v.head_dist.bias""",
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=False ) -> Optional[int]:
'''simple docstring'''
lowercase = get_audio_spectrogram_transformer_config(lowerCAmelCase__ )
lowercase = {
"""ast-finetuned-audioset-10-10-0.4593""": (
"""https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.450""": (
"""https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448""": (
"""https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"""
),
"""ast-finetuned-audioset-10-10-0.448-v2""": (
"""https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"""
),
"""ast-finetuned-audioset-12-12-0.447""": (
"""https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"""
),
"""ast-finetuned-audioset-14-14-0.443""": (
"""https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"""
),
"""ast-finetuned-audioset-16-16-0.442""": (
"""https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"""
),
"""ast-finetuned-speech-commands-v2""": (
"""https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"""
),
}
# load original state_dict
lowercase = model_name_to_url[model_name]
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
# remove some keys
remove_keys(lowerCAmelCase__ )
# rename some keys
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
# load 🤗 model
lowercase = ASTForAudioClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowercase = -4.2_677_393 if """speech-commands""" not in model_name else -6.845_978
lowercase = 4.5_689_974 if """speech-commands""" not in model_name else 5.5_654_526
lowercase = 1_0_2_4 if """speech-commands""" not in model_name else 1_2_8
lowercase = ASTFeatureExtractor(mean=lowerCAmelCase__ , std=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
if "speech-commands" in model_name:
lowercase = load_dataset("""speech_commands""" , """v0.02""" , split="""validation""" )
lowercase = dataset[0]["""audio"""]["""array"""]
else:
lowercase = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" , )
lowercase , lowercase = torchaudio.load(lowerCAmelCase__ )
lowercase = waveform.squeeze().numpy()
lowercase = feature_extractor(lowerCAmelCase__ , sampling_rate=1_6_0_0_0 , return_tensors="""pt""" )
# forward pass
lowercase = model(**lowerCAmelCase__ )
lowercase = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowercase = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowercase = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowercase = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowercase = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowercase = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowercase = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowercase = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowercase = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("""Unknown model name""" )
if not torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ):
raise ValueError("""Logits don't match""" )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print("""Pushing model and feature extractor to the hub...""" )
model.push_to_hub(f'MIT/{model_name}' )
feature_extractor.push_to_hub(f'MIT/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowerCAmelCase : str =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__lowerCAmelCase : List[str] =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
__lowerCAmelCase : int =[]
__lowerCAmelCase : Union[str, Any] =[]
__lowerCAmelCase : str ={"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
__lowerCAmelCase : Optional[Any] =[
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
"""emoji""": True,
},
}
]
__lowerCAmelCase : Tuple =0
for log in Path().glob("""*.log"""):
__lowerCAmelCase : str =0
with open(log, """r""") as f:
for line in f:
__lowerCAmelCase : Any =json.loads(line)
if line.get("""nodeid""", """""") != "":
__lowerCAmelCase : List[str] =line["""nodeid"""]
if line.get("""duration""", None) is not None:
__lowerCAmelCase : Optional[int] =F"""{line['duration']:.4f}"""
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__lowerCAmelCase : List[str] =[]
log.unlink()
__lowerCAmelCase : Union[str, Any] =""""""
__lowerCAmelCase : Tuple =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
__lowerCAmelCase : Optional[Any] =[]
__lowerCAmelCase : List[Any] ={}
for test in failed_tests:
__lowerCAmelCase : Dict =test[0].split("""::""")
__lowerCAmelCase : int =data[0].split("""/""")[-1]
if data[0] not in filesafailed:
__lowerCAmelCase : Union[str, Any] =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__lowerCAmelCase : List[str] =[test[0] for test in failed_table]
__lowerCAmelCase : Dict =list(set(files))
# Count number of instances in failed_tests
__lowerCAmelCase : Optional[int] =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
__lowerCAmelCase : int =tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
__lowerCAmelCase : int ="""Too many failed tests, please see the full report in the Action results."""
__lowerCAmelCase : List[str] =len(err) + 1_0
__lowerCAmelCase : Any =message[: 3_0_0_0 - offset] + F"""\n...\n```\n{err}"""
print(F"""### {message}""")
else:
__lowerCAmelCase : int ="""No failed tests! 🤗"""
print(F"""## {message}""")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
__lowerCAmelCase : Dict =WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
__lowerCAmelCase : int ={
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
__lowerCAmelCase : Tuple ={
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__lowerCAmelCase : Union[str, Any] ={
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__lowerCAmelCase : str =client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
__lowerCAmelCase : Tuple =response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__lowerCAmelCase : str =""""""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__lowerCAmelCase : List[str] =row[0]
else:
__lowerCAmelCase : Optional[Any] =""""""
__lowerCAmelCase : Any ={
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
)
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
import math
import sys
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = """"""
try:
with open(lowerCAmelCase__ , """rb""" ) as binary_file:
lowercase = binary_file.read()
for dat in data:
lowercase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = {"""0""": """0""", """1""": """1"""}
lowercase , lowercase = """""", """"""
lowercase = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase = lexicon[curr_string]
result += last_match_id
lowercase = last_match_id + """0"""
if math.loga(lowerCAmelCase__ ).is_integer():
lowercase = {}
for curr_key in list(lowerCAmelCase__ ):
lowercase = lexicon.pop(lowerCAmelCase__ )
lowercase = new_lex
lowercase = last_match_id + """1"""
index += 1
lowercase = """"""
return result
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
lowercase = 8
try:
with open(lowerCAmelCase__ , """wb""" ) as opened_file:
lowercase = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowercase = data_bits[counter:]
lowercase = data_bits[counter + 1 :]
return data_bits
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
lowercase = read_file_binary(lowerCAmelCase__ )
lowercase = remove_prefix(lowerCAmelCase__ )
lowercase = decompress_data(lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 32
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _A ( lowerCAmelCase ):
snake_case__ : int = 'beit'
def __init__( self , __lowerCAmelCase=8192 , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=224 , __lowerCAmelCase=16 , __lowerCAmelCase=3 , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=True , __lowerCAmelCase=[3, 5, 7, 11] , __lowerCAmelCase=[1, 2, 3, 6] , __lowerCAmelCase=True , __lowerCAmelCase=0.4 , __lowerCAmelCase=256 , __lowerCAmelCase=1 , __lowerCAmelCase=False , __lowerCAmelCase=255 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = use_mask_token
lowercase = use_absolute_position_embeddings
lowercase = use_relative_position_bias
lowercase = use_shared_relative_position_bias
lowercase = layer_scale_init_value
lowercase = drop_path_rate
lowercase = use_mean_pooling
# decode head attributes (semantic segmentation)
lowercase = out_indices
lowercase = pool_scales
# auxiliary head attributes (semantic segmentation)
lowercase = use_auxiliary_head
lowercase = auxiliary_loss_weight
lowercase = auxiliary_channels
lowercase = auxiliary_num_convs
lowercase = auxiliary_concat_input
lowercase = semantic_loss_ignore_index
class _A ( lowerCAmelCase ):
snake_case__ : Dict = version.parse('1.11' )
@property
def A__ ( self ):
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def A__ ( self ):
"""simple docstring"""
return 1E-4
| 32
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : int ={
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'audio-spectrogram-transformer'
def __init__( self , __lowerCAmelCase=768 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=16 , __lowerCAmelCase=True , __lowerCAmelCase=10 , __lowerCAmelCase=10 , __lowerCAmelCase=1024 , __lowerCAmelCase=128 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = patch_size
lowercase = qkv_bias
lowercase = frequency_stride
lowercase = time_stride
lowercase = max_length
lowercase = num_mel_bins
| 32
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _A :
snake_case__ : int
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def UpperCAmelCase__ ( ) -> Node | None:
'''simple docstring'''
lowercase = Node(1 )
lowercase = Node(2 )
lowercase = Node(3 )
lowercase = Node(4 )
lowercase = Node(5 )
return tree
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
lowercase = []
if root is None:
return output
lowercase = deque([root] )
while process_queue:
lowercase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> Sequence[Node | None]:
'''simple docstring'''
lowercase = []
def populate_output(lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> Sequence[Node | None]:
'''simple docstring'''
lowercase = []
def populate_output(lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def UpperCAmelCase__ ( lowerCAmelCase__ :Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
lowercase = []
lowercase = 0
lowercase = height(lowerCAmelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase__ , lowerCAmelCase__ ) )
lowercase = 0
return output
def UpperCAmelCase__ ( ) -> None: # Main function for testing.
'''simple docstring'''
lowercase = make_tree()
print(f'In-order Traversal: {inorder(lowerCAmelCase__ )}' )
print(f'Pre-order Traversal: {preorder(lowerCAmelCase__ )}' )
print(f'Post-order Traversal: {postorder(lowerCAmelCase__ )}' , """\n""" )
print(f'Height of Tree: {height(lowerCAmelCase__ )}' , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(lowerCAmelCase__ ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(lowerCAmelCase__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase__ , level=lowerCAmelCase__ ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__lowerCAmelCase : Dict =logging.get_logger(__name__) # pylint: disable=invalid-name
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , )
def A__ ( self , __lowerCAmelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(__lowerCAmelCase )
@torch.no_grad()
def __call__( self , __lowerCAmelCase , __lowerCAmelCase = 512 , __lowerCAmelCase = 512 , __lowerCAmelCase = 50 , __lowerCAmelCase = 7.5 , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "pil" , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = len(__lowerCAmelCase )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__lowerCAmelCase )}.' )
# get prompt text embeddings
lowercase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 , __lowerCAmelCase , 1 )
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = [""""""]
elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !='
f' {type(__lowerCAmelCase )}.' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = [negative_prompt]
elif batch_size != len(__lowerCAmelCase ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
__lowerCAmelCase , padding="""max_length""" , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors="""pt""" , )
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(__lowerCAmelCase , __lowerCAmelCase , 1 )
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(
__lowerCAmelCase , generator=__lowerCAmelCase , device="""cpu""" , dtype=__lowerCAmelCase ).to(self.device )
lowercase = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device="""cpu""" , dtype=__lowerCAmelCase ).to(
self.device )
else:
lowercase = torch.randn(
__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
lowercase = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowercase = latents_reference.to(self.device )
lowercase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase = 0 if dx < 0 else dx
lowercase = 0 if dy < 0 else dy
lowercase = max(-dx , 0 )
lowercase = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
lowercase = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2 )
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
lowercase = 1 / 0.1_8_2_1_5 * latents
lowercase = self.vae.decode(__lowerCAmelCase ).sample
lowercase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase = self.feature_extractor(self.numpy_to_pil(__lowerCAmelCase ) , return_tensors="""pt""" ).to(
self.device )
lowercase , lowercase = self.safety_checker(
images=__lowerCAmelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase = None
if output_type == "pil":
lowercase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = TextToVideoSDPipeline
snake_case__ : int = TEXT_TO_IMAGE_PARAMS
snake_case__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
snake_case__ : Union[str, Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowercase = CLIPTextModel(__lowerCAmelCase )
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.get_dummy_components()
lowercase = TextToVideoSDPipeline(**__lowerCAmelCase )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = self.get_dummy_inputs(__lowerCAmelCase )
lowercase = """np"""
lowercase = sd_pipe(**__lowerCAmelCase ).frames
lowercase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def A__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCAmelCase , expected_max_diff=1E-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def A__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
lowercase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowercase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase = pipe.to("""cuda""" )
lowercase = """Spiderman is surfing"""
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=25 , output_type="""pt""" ).frames
lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
lowercase = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
lowercase = pipe.to("""cuda""" )
lowercase = """Spiderman is surfing"""
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase = pipe(__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=2 , output_type="""pt""" ).frames
lowercase = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
"""simple docstring"""
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
__lowerCAmelCase : List[Any] =yaml.safe_load(
"""\
name: \"\"
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Dataset Card for X\" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: \"Table of Contents\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Dataset Description\"
allow_empty: false
allow_empty_text: false
subsections:
- name: \"Dataset Summary\"
allow_empty: false
allow_empty_text: false
subsections: null
- name: \"Supported Tasks and Leaderboards\"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
"""
)
__lowerCAmelCase : Union[str, Any] ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
__lowerCAmelCase : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Optional[Any] ={
"""name""": """root""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{
"""name""": """Dataset Card for My Dataset""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [
{"""name""": """Table of Contents""", """text""": """Some text here.""", """is_empty_text""": False, """subsections""": []},
{
"""name""": """Dataset Description""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Dataset Summary""",
"""text""": """Some text here.""",
"""is_empty_text""": False,
"""subsections""": [
{
"""name""": """Extra Ignored Subsection""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
}
],
},
{
"""name""": """Supported Tasks and Leaderboards""",
"""text""": """""",
"""is_empty_text""": True,
"""subsections""": [],
},
{"""name""": """Languages""", """text""": """Language Text""", """is_empty_text""": False, """subsections""": []},
],
},
],
}
],
}
__lowerCAmelCase : List[str] ="""\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Optional[Any] =(
"""The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."""
)
__lowerCAmelCase : Optional[Any] ="""\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict =(
"""The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."""
)
__lowerCAmelCase : List[str] ="""\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."""
__lowerCAmelCase : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : str ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."""
__lowerCAmelCase : Tuple ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
"""
__lowerCAmelCase : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
"""
__lowerCAmelCase : Optional[Any] ="""The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
"""
__lowerCAmelCase : Union[str, Any] ="""The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."""
__lowerCAmelCase : List[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
"""
__lowerCAmelCase : List[str] ="""The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."""
__lowerCAmelCase : Optional[Any] ="""\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."""
__lowerCAmelCase : Tuple =""""""
__lowerCAmelCase : Dict ="""The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."""
__lowerCAmelCase : str ="""\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
"""
__lowerCAmelCase : Tuple ="""The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."""
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[int] ) -> int:
'''simple docstring'''
assert ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
lowercase = ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
ReadMe.from_string(lowerCAmelCase__ , lowerCAmelCase__ , suppress_parsing_errors=lowerCAmelCase__ )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
lowercase = ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
lowercase = expected_error.format(path=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ):
lowercase = ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
lowercase = expected_error.format(path=lowerCAmelCase__ )
with pytest.raises(lowerCAmelCase__ , match=re.escape(lowerCAmelCase__ ) ):
ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase = Path(lowerCAmelCase__ ) / """README.md"""
with open(lowerCAmelCase__ , """w+""" ) as readme_file:
readme_file.write(lowerCAmelCase__ )
ReadMe.from_readme(lowerCAmelCase__ , lowerCAmelCase__ , suppress_parsing_errors=lowerCAmelCase__ )
| 32
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
lowercase = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowercase = 4
lowercase = 4_8
lowercase = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase = [6, 6, 6, 6]
lowercase = 6_0
lowercase = [6, 6, 6, 6]
lowercase = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase = 4
lowercase = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowercase = 1
lowercase = 1
lowercase = 1_2_6
lowercase = 7
lowercase = 255.0
lowercase = """"""
return config
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowercase = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowercase = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowercase = """layernorm.weight"""
if name == "norm.bias":
lowercase = """layernorm.bias"""
if "conv_first" in name:
lowercase = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowercase = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowercase = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowercase = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowercase = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowercase = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowercase = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowercase = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowercase = """swin2sr.""" + name
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[1] )
lowercase = int(key_split[4] )
lowercase = config.embed_dim
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
pass
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
lowercase = get_config(lowerCAmelCase__ )
lowercase = SwinaSRForImageSuperResolution(lowerCAmelCase__ )
model.eval()
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" )
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowerCAmelCase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
lowercase = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
lowercase = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowercase = 1_2_6 if """Jpeg""" in checkpoint_url else 2_5_6
lowercase = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase = transforms(lowerCAmelCase__ ).unsqueeze(0 )
if config.num_channels == 1:
lowercase = pixel_values[:, 0, :, :].unsqueeze(1 )
lowercase = model(lowerCAmelCase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowercase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowercase = torch.Size([1, 3, 5_1_2, 5_1_2] )
lowercase = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowercase = torch.Size([1, 3, 1_0_2_4, 1_0_2_4] )
lowercase = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase__ , atol=1e-3 )
print("""Looks ok!""" )
lowercase = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowercase = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
__lowerCAmelCase : List[str] =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'realm'
def __init__( self , __lowerCAmelCase=3_0522 , __lowerCAmelCase=768 , __lowerCAmelCase=128 , __lowerCAmelCase=12 , __lowerCAmelCase=12 , __lowerCAmelCase=8 , __lowerCAmelCase=3072 , __lowerCAmelCase="gelu_new" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=512 , __lowerCAmelCase=2 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=256 , __lowerCAmelCase=10 , __lowerCAmelCase=1E-3 , __lowerCAmelCase=5 , __lowerCAmelCase=320 , __lowerCAmelCase=1335_3718 , __lowerCAmelCase=5000 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
lowercase = vocab_size
lowercase = max_position_embeddings
lowercase = hidden_size
lowercase = retriever_proj_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = num_candidates
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = type_vocab_size
lowercase = layer_norm_eps
# Reader config
lowercase = span_hidden_size
lowercase = max_span_width
lowercase = reader_layer_norm_eps
lowercase = reader_beam_size
lowercase = reader_seq_len
# Retrieval config
lowercase = num_block_records
lowercase = searcher_beam_size
| 32
|
"""simple docstring"""
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__lowerCAmelCase : List[str] =logging.getLogger(__name__)
__lowerCAmelCase : Dict =tf.data.AUTOTUNE
def UpperCAmelCase__ ( ) -> List[str]:
'''simple docstring'''
lowercase = argparse.ArgumentParser(description="""Train a masked language model on TPU.""" )
parser.add_argument(
"""--pretrained_model_config""" , type=lowerCAmelCase__ , default="""roberta-base""" , help="""The model config to use. Note that we don't copy the model's weights, only the config!""" , )
parser.add_argument(
"""--tokenizer""" , type=lowerCAmelCase__ , default="""unigram-tokenizer-wikitext""" , help="""The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size.""" , )
parser.add_argument(
"""--per_replica_batch_size""" , type=lowerCAmelCase__ , default=8 , help="""Batch size per TPU core.""" , )
parser.add_argument(
"""--no_tpu""" , action="""store_true""" , help="""If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances.""" , )
parser.add_argument(
"""--tpu_name""" , type=lowerCAmelCase__ , help="""Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs.""" , default="""local""" , )
parser.add_argument(
"""--tpu_zone""" , type=lowerCAmelCase__ , help="""Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.""" , )
parser.add_argument(
"""--gcp_project""" , type=lowerCAmelCase__ , help="""Google cloud project name. Only used for non-Colab TPU nodes.""" )
parser.add_argument(
"""--bfloat16""" , action="""store_true""" , help="""Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.""" , )
parser.add_argument(
"""--train_dataset""" , type=lowerCAmelCase__ , help="""Path to training dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--shuffle_buffer_size""" , type=lowerCAmelCase__ , default=2**1_8 , help="""Size of the shuffle buffer (in samples)""" , )
parser.add_argument(
"""--eval_dataset""" , type=lowerCAmelCase__ , help="""Path to evaluation dataset to load. If the path begins with `gs://`"""
""" then the dataset will be loaded from a Google Cloud Storage bucket.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase__ , default=1 , help="""Number of epochs to train for.""" , )
parser.add_argument(
"""--learning_rate""" , type=lowerCAmelCase__ , default=1e-4 , help="""Learning rate to use for training.""" , )
parser.add_argument(
"""--weight_decay_rate""" , type=lowerCAmelCase__ , default=1e-3 , help="""Weight decay rate to use for training.""" , )
parser.add_argument(
"""--max_length""" , type=lowerCAmelCase__ , default=5_1_2 , help="""Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py""" , )
parser.add_argument(
"""--mlm_probability""" , type=lowerCAmelCase__ , default=0.15 , help="""Fraction of tokens to mask during training.""" , )
parser.add_argument("""--output_dir""" , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="""Path to save model checkpoints to.""" )
parser.add_argument("""--hub_model_id""" , type=lowerCAmelCase__ , help="""Model ID to upload to on the Hugging Face Hub.""" )
lowercase = parser.parse_args()
return args
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
try:
if args.tpu_name:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"""Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or """
"""--gcp_project. When running on a TPU VM, use --tpu_name local.""" )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for file in file_list:
lowercase = file.split("""/""" )[-1]
lowercase = re.search(R"""-\d+-(\d+)\.tfrecord""" , lowerCAmelCase__ ).group(1 )
lowercase = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any]=None ) -> List[Any]:
'''simple docstring'''
lowercase = count_samples(lowerCAmelCase__ )
lowercase = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
lowercase = dataset.shuffle(len(lowerCAmelCase__ ) )
lowercase = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase = dataset.shuffle(args.shuffle_buffer_size )
lowercase = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
lowercase = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
lowercase = dataset.prefetch(lowerCAmelCase__ )
return dataset
def UpperCAmelCase__ ( lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if not args.no_tpu:
lowercase = initialize_tpu(lowerCAmelCase__ )
lowercase = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
lowercase = tf.distribute.OneDeviceStrategy(device="""/gpu:0""" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("""mixed_bfloat16""" )
lowercase = AutoTokenizer.from_pretrained(args.tokenizer )
lowercase = AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase = tokenizer.vocab_size
lowercase = tf.io.gfile.glob(os.path.join(args.train_dataset , """*.tfrecord""" ) )
if not training_records:
raise ValueError(f'No .tfrecord files found in {args.train_dataset}.' )
lowercase = tf.io.gfile.glob(os.path.join(args.eval_dataset , """*.tfrecord""" ) )
if not eval_records:
raise ValueError(f'No .tfrecord files found in {args.eval_dataset}.' )
lowercase = count_samples(lowerCAmelCase__ )
lowercase = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase = steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=["""accuracy"""] )
def decode_fn(lowerCAmelCase__ :Any ):
lowercase = {
"""input_ids""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"""attention_mask""": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors="""tf""" )
def mask_with_collator(lowerCAmelCase__ :Dict ):
# TF really needs an isin() function
lowercase = (
~tf.cast(batch["""attention_mask"""] , tf.bool )
| (batch["""input_ids"""] == tokenizer.cls_token_id)
| (batch["""input_ids"""] == tokenizer.sep_token_id)
)
lowercase , lowercase = data_collator.tf_mask_tokens(
batch["""input_ids"""] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
lowercase = args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
lowercase = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =parse_args()
main(args)
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : Optional[int] ={
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : List[Any] ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : str ={
"""configuration_audio_spectrogram_transformer""": [
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ASTConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =[
"""AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ASTForAudioClassification""",
"""ASTModel""",
"""ASTPreTrainedModel""",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[Any] =["""ASTFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase : Tuple ={
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'mask2former'
snake_case__ : Union[str, Any] = ['swin']
snake_case__ : Any = {'hidden_size': 'hidden_dim'}
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 256 , __lowerCAmelCase = 1024 , __lowerCAmelCase = "relu" , __lowerCAmelCase = 6 , __lowerCAmelCase = 10 , __lowerCAmelCase = 8 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 2048 , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 4 , __lowerCAmelCase = 255 , __lowerCAmelCase = 100 , __lowerCAmelCase = 0.1 , __lowerCAmelCase = 2.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 5.0 , __lowerCAmelCase = 1_2544 , __lowerCAmelCase = 3.0 , __lowerCAmelCase = 0.7_5 , __lowerCAmelCase = 0.0_2 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = True , __lowerCAmelCase = [4, 8, 16, 32] , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.""" )
lowercase = CONFIG_MAPPING["""swin"""](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=__lowerCAmelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase = backbone_config.pop("""model_type""" )
lowercase = CONFIG_MAPPING[backbone_model_type]
lowercase = config_class.from_dict(__lowerCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
lowercase = backbone_config
lowercase = feature_size
lowercase = mask_feature_size
lowercase = hidden_dim
lowercase = encoder_feedforward_dim
lowercase = activation_function
lowercase = encoder_layers
lowercase = decoder_layers
lowercase = num_attention_heads
lowercase = dropout
lowercase = dim_feedforward
lowercase = pre_norm
lowercase = enforce_input_projection
lowercase = common_stride
lowercase = ignore_value
lowercase = num_queries
lowercase = no_object_weight
lowercase = class_weight
lowercase = mask_weight
lowercase = dice_weight
lowercase = train_num_points
lowercase = oversample_ratio
lowercase = importance_sample_ratio
lowercase = init_std
lowercase = init_xavier_std
lowercase = use_auxiliary_loss
lowercase = feature_strides
lowercase = output_auxiliary_logits
lowercase = decoder_layers
super().__init__(**__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return cls(
backbone_config=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = copy.deepcopy(self.__dict__ )
lowercase = self.backbone_config.to_dict()
lowercase = self.__class__.model_type
return output
| 32
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[Any] ={
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _A ( lowerCAmelCase ):
snake_case__ : Dict = 'xlm-roberta-xl'
def __init__( self , __lowerCAmelCase=25_0880 , __lowerCAmelCase=2560 , __lowerCAmelCase=36 , __lowerCAmelCase=32 , __lowerCAmelCase=1_0240 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=514 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0_2 , __lowerCAmelCase=1E-05 , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=2 , __lowerCAmelCase="absolute" , __lowerCAmelCase=True , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
lowercase = classifier_dropout
class _A ( lowerCAmelCase ):
@property
def A__ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 32
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def UpperCAmelCase__ ( lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
lowercase = s.rsplit(lowerCAmelCase__ , lowerCAmelCase__ )
return new.join(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = {}
lowercase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
lowercase = key.replace(f'{group_key}.' , f'{group_key}.group.' )
if "res_path" in key:
lowercase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
lowercase = rreplace(lowerCAmelCase__ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
lowercase = rreplace(lowerCAmelCase__ , """.b""" , """.bias""" , 1 )
lowercase = value.float()
return upgrade
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :Any=True ) -> Any:
'''simple docstring'''
from dall_e import Encoder
lowercase = Encoder()
if os.path.exists(lowerCAmelCase__ ):
lowercase = torch.load(lowerCAmelCase__ )
else:
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase__ )
if config_path is not None:
lowercase = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase__ )
else:
lowercase = FlavaImageCodebookConfig()
lowercase = FlavaImageCodebook(lowerCAmelCase__ ).eval()
lowercase = encoder.state_dict()
lowercase = upgrade_state_dict(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.state_dict()
lowercase = count_parameters(lowerCAmelCase__ )
lowercase = count_parameters(lowerCAmelCase__ )
assert torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase__ )
else:
return hf_state_dict
if __name__ == "__main__":
__lowerCAmelCase : Tuple =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCAmelCase : Any =parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
import random
from typing import Any
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = []
lowercase = 0
lowercase = 0
def A__ ( self ):
"""simple docstring"""
return self.head == self.tail
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.data.append(__lowerCAmelCase )
lowercase = self.tail + 1
def A__ ( self ):
"""simple docstring"""
lowercase = self.data[self.head]
lowercase = self.head + 1
return ret
def A__ ( self ):
"""simple docstring"""
return self.tail - self.head
def A__ ( self ):
"""simple docstring"""
print(self.data )
print("""**************""" )
print(self.data[self.head : self.tail] )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
lowercase = None
lowercase = None
lowercase = 1
def A__ ( self ):
"""simple docstring"""
return self.data
def A__ ( self ):
"""simple docstring"""
return self.left
def A__ ( self ):
"""simple docstring"""
return self.right
def A__ ( self ):
"""simple docstring"""
return self.height
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = data
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = node
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = node
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = height
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if a > b:
return a
return b
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode ) -> MyNode:
'''simple docstring'''
print("""left rotation node:""" , node.get_data() )
lowercase = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(lowerCAmelCase__ )
lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
lowercase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode ) -> MyNode:
'''simple docstring'''
print("""right rotation node:""" , node.get_data() )
lowercase = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(lowerCAmelCase__ )
lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
lowercase = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(lowerCAmelCase__ )
return ret
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode ) -> MyNode:
'''simple docstring'''
lowercase = node.get_left()
assert left_child is not None
node.set_left(left_rotation(lowerCAmelCase__ ) )
return right_rotation(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode ) -> MyNode:
'''simple docstring'''
lowercase = node.get_right()
assert right_child is not None
node.set_right(right_rotation(lowerCAmelCase__ ) )
return left_rotation(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode | None , lowerCAmelCase__ :Any ) -> MyNode | None:
'''simple docstring'''
if node is None:
return MyNode(lowerCAmelCase__ )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , lowerCAmelCase__ ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
lowercase = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
lowercase = right_rotation(lowerCAmelCase__ )
else:
lowercase = lr_rotation(lowerCAmelCase__ )
else:
node.set_right(insert_node(node.get_right() , lowerCAmelCase__ ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
lowercase = node.get_right()
assert right_child is not None
if data < right_child.get_data():
lowercase = rl_rotation(lowerCAmelCase__ )
else:
lowercase = left_rotation(lowerCAmelCase__ )
lowercase = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(lowerCAmelCase__ )
return node
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode ) -> Any:
'''simple docstring'''
while True:
lowercase = root.get_right()
if right_child is None:
break
lowercase = right_child
return root.get_data()
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode ) -> Any:
'''simple docstring'''
while True:
lowercase = root.get_left()
if left_child is None:
break
lowercase = left_child
return root.get_data()
def UpperCAmelCase__ ( lowerCAmelCase__ :MyNode , lowerCAmelCase__ :Any ) -> MyNode | None:
'''simple docstring'''
lowercase = root.get_left()
lowercase = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
lowercase = get_left_most(lowerCAmelCase__ )
root.set_data(lowerCAmelCase__ )
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
elif left_child is not None:
lowercase = left_child
elif right_child is not None:
lowercase = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print("""No such data""" )
return root
else:
root.set_left(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(lowerCAmelCase__ , lowerCAmelCase__ ) )
if get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
lowercase = left_rotation(lowerCAmelCase__ )
else:
lowercase = rl_rotation(lowerCAmelCase__ )
elif get_height(lowerCAmelCase__ ) - get_height(lowerCAmelCase__ ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
lowercase = right_rotation(lowerCAmelCase__ )
else:
lowercase = lr_rotation(lowerCAmelCase__ )
lowercase = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(lowerCAmelCase__ )
return root
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = None
def A__ ( self ):
"""simple docstring"""
return get_height(self.root )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
print("""insert:""" + str(__lowerCAmelCase ) )
lowercase = insert_node(self.root , __lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
print("""delete:""" + str(__lowerCAmelCase ) )
if self.root is None:
print("""Tree is empty!""" )
return
lowercase = del_node(self.root , __lowerCAmelCase )
def __str__( self , ): # a level traversale, gives a more intuitive look on the tree
"""simple docstring"""
lowercase = """"""
lowercase = MyQueue()
q.push(self.root )
lowercase = self.get_height()
if layer == 0:
return output
lowercase = 0
while not q.is_empty():
lowercase = q.pop()
lowercase = """ """ * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(__lowerCAmelCase )
q.push(__lowerCAmelCase )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
lowercase = cnt + 1
for i in range(100 ):
if cnt == math.pow(2 , __lowerCAmelCase ) - 1:
lowercase = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__lowerCAmelCase : Any =AVLtree()
__lowerCAmelCase : int =list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 32
|
"""simple docstring"""
import enum
import shutil
import sys
__lowerCAmelCase , __lowerCAmelCase : List[str] =shutil.get_terminal_size()
__lowerCAmelCase : Union[str, Any] ={"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class _A ( enum.Enum ):
snake_case__ : Tuple = 0
snake_case__ : List[str] = 1
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]="" ) -> List[Any]:
'''simple docstring'''
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[Any]="" ) -> Optional[Any]:
'''simple docstring'''
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
forceWrite("""\r""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def UpperCAmelCase__ ( ) -> int:
'''simple docstring'''
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase__ ( ) -> Dict:
'''simple docstring'''
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 32
| 1
|
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCAmelCase : Dict ={
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
lowercase = {}
state_dict.pop("""pixel_mean""" , lowerCAmelCase__ )
state_dict.pop("""pixel_std""" , lowerCAmelCase__ )
lowercase = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
lowercase = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
lowercase = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
lowercase = key.replace("""layers.2""" , """proj_out""" )
lowercase = value
lowercase = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int]="ybelkada/segment-anything" ) -> List[Any]:
'''simple docstring'''
lowercase = hf_hub_download(lowerCAmelCase__ , f'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowercase = SamConfig()
elif "sam_vit_l" in model_name:
lowercase = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
lowercase = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
lowercase = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
lowercase = SamConfig(
vision_config=lowerCAmelCase__ , )
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )
lowercase = replace_keys(lowerCAmelCase__ )
lowercase = SamImageProcessor()
lowercase = SamProcessor(image_processor=lowerCAmelCase__ )
lowercase = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
lowercase = hf_model.to("""cuda""" )
lowercase = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
lowercase = [[[4_0_0, 6_5_0]]]
lowercase = [[1]]
lowercase = processor(images=np.array(lowerCAmelCase__ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
lowercase = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
lowercase = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
lowercase = processor(images=np.array(lowerCAmelCase__ ) , input_boxes=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
lowercase = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
lowercase = [[1, 1]]
lowercase = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase = hf_model(**lowerCAmelCase__ )
lowercase = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__lowerCAmelCase : Any =argparse.ArgumentParser()
__lowerCAmelCase : List[Any] =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("""only integers accepted as input""" )
else:
lowercase = str(abs(lowerCAmelCase__ ) )
lowercase = [list(lowerCAmelCase__ ) for char in range(len(lowerCAmelCase__ ) )]
for index in range(len(lowerCAmelCase__ ) ):
num_transpositions[index].pop(lowerCAmelCase__ )
return max(
int("""""".join(list(lowerCAmelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 32
| 1
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[Any] = RobertaTokenizer
snake_case__ : Dict = RobertaTokenizerFast
snake_case__ : List[str] = True
snake_case__ : List[str] = {'cls_token': '<s>'}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """lower newer"""
lowercase = """lower newer"""
return input_text, output_text
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase = """lower newer"""
lowercase = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=__lowerCAmelCase ) , [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=__lowerCAmelCase ) , [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] , )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""roberta-base""" )
lowercase = tokenizer.encode("""sequence builders""" , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.encode(
"""sequence builders""" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowercase = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase )
lowercase = tokenizer.build_inputs_with_special_tokens(__lowerCAmelCase , __lowerCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
lowercase = """Encode this sequence."""
lowercase = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing spaces after special tokens
lowercase = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase )} ) # mask token has a left space
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
lowercase = """Encode <mask> sequence"""
lowercase = """Encode <mask>sequence"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = encoded.index(__lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = encoded.index(__lowerCAmelCase )
lowercase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = """A, <mask> AllenNLP sentence."""
lowercase = tokenizer_r.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
lowercase = tokenizer_p.encode_plus(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def A__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , __lowerCAmelCase )
self.assertEqual(post_processor_state["""add_prefix_space"""] , __lowerCAmelCase )
self.assertEqual(post_processor_state["""trim_offsets"""] , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowercase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowercase = f'{text_of_1_token} {text_of_1_token}'
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ) + 1, len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCAmelCase ), len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowercase = f' {text}'
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ) + 1, 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
lowercase = self.rust_tokenizer_class.from_pretrained(
__lowerCAmelCase , use_fast=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase )
lowercase = tokenizer_r(__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCAmelCase ), 1 + len(__lowerCAmelCase ) + 1 + len(__lowerCAmelCase )) , )
| 32
|
"""simple docstring"""
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : List[Any] =numpy.array([0, 0])
__lowerCAmelCase : List[str] =numpy.array([0.5, 0.866_0254])
__lowerCAmelCase : List[Any] =numpy.array([1, 0])
__lowerCAmelCase : int =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] , lowerCAmelCase__ :int ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = initial_vectors
for _ in range(lowerCAmelCase__ ):
lowercase = iteration_step(lowerCAmelCase__ )
return vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
lowercase = []
for i, start_vector in enumerate(vectors[:-1] ):
lowercase = vectors[i + 1]
new_vectors.append(lowerCAmelCase__ )
lowercase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def UpperCAmelCase__ ( lowerCAmelCase__ :numpy.ndarray , lowerCAmelCase__ :float ) -> numpy.ndarray:
'''simple docstring'''
lowercase = numpy.radians(lowerCAmelCase__ )
lowercase , lowercase = numpy.cos(lowerCAmelCase__ ), numpy.sin(lowerCAmelCase__ )
lowercase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :list[numpy.ndarray] ) -> None:
'''simple docstring'''
lowercase = plt.gca()
axes.set_aspect("""equal""" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
lowercase , lowercase = zip(*lowerCAmelCase__ )
plt.plot(lowerCAmelCase__ , lowerCAmelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Optional[int] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 32
| 1
|
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCAmelCase : Optional[Any] =re.compile(R"""\s+""")
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(lowerCAmelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = [len(lowerCAmelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase__ ), "line_max": max(lowerCAmelCase__ )}
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=5 ) -> Optional[int]:
'''simple docstring'''
lowercase = ["""auto-generated""", """autogenerated""", """automatically generated"""]
lowercase = example["""content"""].splitlines()
for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Union[str, Any]=5 , lowerCAmelCase__ :Dict=0.05 ) -> str:
'''simple docstring'''
lowercase = ["""unit tests""", """test file""", """configuration file"""]
lowercase = example["""content"""].splitlines()
lowercase = 0
lowercase = 0
# first test
for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowercase = example["""content"""].count("""\n""" )
lowercase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> Any:
'''simple docstring'''
lowercase = ["""def """, """class """, """for """, """while """]
lowercase = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any]=4 ) -> Optional[int]:
'''simple docstring'''
lowercase = example["""content"""].splitlines()
lowercase = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
lowercase = tokenizer(example["""content"""] , truncation=lowerCAmelCase__ )["""input_ids"""]
lowercase = len(example["""content"""] ) / len(lowerCAmelCase__ )
return {"ratio": ratio}
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> List[str]:
'''simple docstring'''
lowercase = {}
results.update(get_hash(lowerCAmelCase__ ) )
results.update(line_stats(lowerCAmelCase__ ) )
results.update(alpha_stats(lowerCAmelCase__ ) )
results.update(char_token_ratio(lowerCAmelCase__ ) )
results.update(is_autogenerated(lowerCAmelCase__ ) )
results.update(is_config_or_test(lowerCAmelCase__ ) )
results.update(has_no_keywords(lowerCAmelCase__ ) )
results.update(has_few_assignments(lowerCAmelCase__ ) )
return results
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
if not check_uniques(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
with open(lowerCAmelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCAmelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
os.unlink(lowerCAmelCase__ )
# Settings
__lowerCAmelCase : List[Any] =HfArgumentParser(PreprocessingArguments)
__lowerCAmelCase : Union[str, Any] =parser.parse_args()
if args.num_workers is None:
__lowerCAmelCase : Dict =multiprocessing.cpu_count()
__lowerCAmelCase : List[str] =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCAmelCase : Union[str, Any] =time.time()
__lowerCAmelCase : Union[str, Any] =load_dataset(args.dataset_name, split="""train""")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__lowerCAmelCase : Tuple =time.time()
__lowerCAmelCase : str =ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__lowerCAmelCase : int =set(ds.unique("""hash"""))
__lowerCAmelCase : Union[str, Any] =len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__lowerCAmelCase : Union[str, Any] =time.time()
__lowerCAmelCase : Tuple =ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCAmelCase : str =time.time()
__lowerCAmelCase , __lowerCAmelCase : int =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__lowerCAmelCase : Tuple =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCAmelCase : int =output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCAmelCase : Optional[int] =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCAmelCase : List[str] =str(data_dir / F"""file-{file_number+1:012}.json""")
__lowerCAmelCase : Optional[Any] =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = credit_card_number
lowercase = 0
lowercase = len(lowerCAmelCase__ ) - 2
for i in range(lowerCAmelCase__ , -1 , -2 ):
# double the value of every second digit
lowercase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 1_0
digit += 1
lowercase = cc_number[:i] + str(lowerCAmelCase__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(lowerCAmelCase__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 1_0 == 0
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> bool:
'''simple docstring'''
lowercase = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 1_3 <= len(lowerCAmelCase__ ) <= 1_6:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(lowerCAmelCase__ ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(lowerCAmelCase__ ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 32
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Dict ={
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
"""simple docstring"""
lowercase = 1
lowercase = 3
lowercase = (32, 32)
lowercase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowerCAmelCase )
return image
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
def extract(*__lowerCAmelCase , **__lowerCAmelCase ):
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = torch.ones([0] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
self.pixel_values.to(__lowerCAmelCase )
return self
return Out()
return extract
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe([prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
lowercase = output.images
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(0 )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=__lowerCAmelCase )
assert isinstance(__lowerCAmelCase , __lowerCAmelCase )
assert isinstance(pipe.scheduler , __lowerCAmelCase )
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCAmelCase )
lowercase = StableDiffusionPipeline.from_pretrained(__lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
lowercase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_cond_unet
lowercase = PNDMScheduler(skip_prk_steps=__lowerCAmelCase )
lowercase = self.dummy_vae
lowercase = self.dummy_text_encoder
lowercase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
lowercase = unet.half()
lowercase = vae.half()
lowercase = bert.half()
# make sure here that pndm scheduler skips prk
lowercase = StableDiffusionPipeline(
unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , safety_checker=__lowerCAmelCase , feature_extractor=self.dummy_extractor , )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """A painting of a squirrel eating a burger"""
lowercase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
lowercase = 40_0366_0346
lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=__lowerCAmelCase )
lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = """padme amidala taking a bath artwork, safe for work, no nudity"""
lowercase = 27_3497_1755
lowercase = 7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def A__ ( self ):
"""simple docstring"""
lowercase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
lowercase = sd_pipe.to(__lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
lowercase = 10_4435_5234
lowercase = 12
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
lowercase = torch.manual_seed(__lowerCAmelCase )
lowercase = sd_pipe(
[prompt] , generator=__lowerCAmelCase , guidance_scale=__lowerCAmelCase , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
lowercase = output.images
lowercase = image[0, -3:, -3:, -1]
lowercase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 32
| 1
|
"""simple docstring"""
import math
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase__ ( lowerCAmelCase__ :float = 0.1 ) -> int:
'''simple docstring'''
lowercase = 3
lowercase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowerCAmelCase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 1
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
"""simple docstring"""
super().__init__()
lowercase = value_function
lowercase = unet
lowercase = scheduler
lowercase = env
lowercase = env.get_dataset()
lowercase = {}
for key in self.data.keys():
try:
lowercase = self.data[key].mean()
except: # noqa: E722
pass
lowercase = {}
for key in self.data.keys():
try:
lowercase = self.data[key].std()
except: # noqa: E722
pass
lowercase = env.observation_space.shape[0]
lowercase = env.action_space.shape[0]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if type(__lowerCAmelCase ) is dict:
return {k: self.to_torch(__lowerCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(__lowerCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(__lowerCAmelCase , device=self.unet.device )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
for key, val in cond.items():
lowercase = val.clone()
return x_in
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = x.shape[0]
lowercase = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase = torch.full((batch_size,) , __lowerCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(__lowerCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase = self.value_function(x.permute(0 , 2 , 1 ) , __lowerCAmelCase ).sample
lowercase = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase = self.scheduler._get_variance(__lowerCAmelCase )
lowercase = torch.exp(0.5 * posterior_variance )
lowercase = model_std * grad
lowercase = 0
lowercase = x.detach()
lowercase = x + scale * grad
lowercase = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim )
lowercase = self.unet(x.permute(0 , 2 , 1 ) , __lowerCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , predict_epsilon=__lowerCAmelCase )["""prev_sample"""]
# apply conditions to the trajectory (set the initial state)
lowercase = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim )
lowercase = self.to_torch(__lowerCAmelCase )
return x, y
def __call__( self , __lowerCAmelCase , __lowerCAmelCase=64 , __lowerCAmelCase=32 , __lowerCAmelCase=2 , __lowerCAmelCase=0.1 ):
"""simple docstring"""
lowercase = self.normalize(__lowerCAmelCase , """observations""" )
lowercase = obs[None].repeat(__lowerCAmelCase , axis=0 )
lowercase = {0: self.to_torch(__lowerCAmelCase )}
lowercase = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase = randn_tensor(__lowerCAmelCase , device=self.unet.device )
lowercase = self.reset_xa(__lowerCAmelCase , __lowerCAmelCase , self.action_dim )
lowercase = self.to_torch(__lowerCAmelCase )
# run the diffusion process
lowercase , lowercase = self.run_diffusion(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# sort output trajectories by value
lowercase = y.argsort(0 , descending=__lowerCAmelCase ).squeeze()
lowercase = x[sorted_idx]
lowercase = sorted_values[:, :, : self.action_dim]
lowercase = actions.detach().cpu().numpy()
lowercase = self.de_normalize(__lowerCAmelCase , key="""actions""" )
# select the action with the highest value
if y is not None:
lowercase = 0
else:
# if we didn't run value guiding, select a random action
lowercase = np.random.randint(0 , __lowerCAmelCase )
lowercase = denorm_actions[selected_index, 0]
return denorm_actions
| 32
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCAmelCase : Union[str, Any] =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = {}
if top_k is not None:
lowercase = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = load_image(__lowerCAmelCase )
lowercase = self.image_processor(images=__lowerCAmelCase , return_tensors=self.framework )
return model_inputs
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.model(**__lowerCAmelCase )
return model_outputs
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
lowercase = self.model.config.num_labels
if self.framework == "pt":
lowercase = model_outputs.logits.softmax(-1 )[0]
lowercase , lowercase = probs.topk(__lowerCAmelCase )
elif self.framework == "tf":
lowercase = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowercase = tf.math.top_k(__lowerCAmelCase , k=__lowerCAmelCase )
lowercase , lowercase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
lowercase = scores.tolist()
lowercase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__lowerCAmelCase , __lowerCAmelCase )]
| 32
| 1
|
"""simple docstring"""
__lowerCAmelCase : Optional[int] ="""ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def UpperCAmelCase__ ( ) -> None:
'''simple docstring'''
lowercase = input("""Enter message: """ )
lowercase = input("""Enter key [alphanumeric]: """ )
lowercase = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase = """encrypt"""
lowercase = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase = """decrypt"""
lowercase = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'\n{mode.title()}ed message:' )
print(lowerCAmelCase__ )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return translate_message(lowerCAmelCase__ , lowerCAmelCase__ , """encrypt""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return translate_message(lowerCAmelCase__ , lowerCAmelCase__ , """decrypt""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
lowercase = []
lowercase = 0
lowercase = key.upper()
for symbol in message:
lowercase = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase__ ):
lowercase = 0
else:
translated.append(lowerCAmelCase__ )
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 32
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 32
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__lowerCAmelCase : int =logging.get_logger(__name__)
class _A ( lowerCAmelCase ):
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 32
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : Optional[int] = GPTSanJapaneseTokenizer
snake_case__ : int = False
snake_case__ : Tuple = {'do_clean_text': False, 'add_prefix_space': False}
def A__ ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
lowercase = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowercase = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowercase = {"""unk_token""": """<unk>"""}
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__lowerCAmelCase ) )
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.get_input_output_texts(__lowerCAmelCase )
lowercase = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
return text, ids
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
pass # TODO add if relevant
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、世界。 こんばんは、㔺界。"""
lowercase = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowercase = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids without special tokens
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
# Testing conversion to ids with special tokens
lowercase = tokens + [tokenizer.unk_token]
lowercase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowercase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.get_tokenizer()
# Testing tokenization
lowercase = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowercase = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowercase = tokenizer.encode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = """こんにちは、世界。こんばんは、世界。😀"""
lowercase = tokenizer.encode(prefix_text + input_text )
lowercase = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowercase = tokenizer.encode(__lowerCAmelCase , prefix_text=__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
lowercase = tokenizer.decode(__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowercase = """こんにちは、世界。"""
lowercase = """こんばんは、㔺界。😀"""
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = len(tokenizer.encode(__lowerCAmelCase ) ) - 2
lowercase = [1] + [0] * (len_prefix + len_text + 1)
lowercase = [1] * (len_prefix + len_text + 1) + [0]
lowercase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowercase = tokenizer(prefix_text + input_text ).token_type_ids
lowercase = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowercase = tokenizer(__lowerCAmelCase , prefix_text=__lowerCAmelCase ).token_type_ids
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = tokenizer.encode("""あンいワ""" )
lowercase = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowercase = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertEqual(tokenizer.decode(__lowerCAmelCase ) , tokenizer.decode(__lowerCAmelCase ) )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowercase = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowercase = tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase )
lowercase = tokenizer.batch_encode_plus(__lowerCAmelCase , padding=__lowerCAmelCase )
# fmt: off
lowercase = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]]
lowercase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowercase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token.attention_mask , __lowerCAmelCase )
self.assertListEqual(x_token_a.input_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.token_type_ids , __lowerCAmelCase )
self.assertListEqual(x_token_a.attention_mask , __lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self ):
"""simple docstring"""
pass
| 32
| 1
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[int] , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
if len(lowerCAmelCase__ ) < k or k < 0:
raise ValueError("""Invalid Input""" )
lowercase = lowercase = sum(array[:k] )
for i in range(len(lowerCAmelCase__ ) - k ):
lowercase = current_sum - array[i] + array[i + k]
lowercase = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__lowerCAmelCase : Union[str, Any] =[randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
__lowerCAmelCase : Tuple =randint(0, 1_1_0)
print(F"""The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}""")
| 32
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : List[str] ={"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[Any] =["""ViTFeatureExtractor"""]
__lowerCAmelCase : List[str] =["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =[
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict =[
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 32
| 1
|
"""simple docstring"""
__lowerCAmelCase : List[str] =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase : Any =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__lowerCAmelCase : str ={
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> str:
'''simple docstring'''
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
lowercase = year // 1_0_0
lowercase = (5 * (century % 4) + 2) % 7
lowercase = year % 1_0_0
lowercase = centurian % 1_2
lowercase = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowercase = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowercase = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = True , __lowerCAmelCase = "arrow" , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = load_from_cache_file
lowercase = file_format
lowercase = Spark(
df=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , working_dir=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__lowerCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 32
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = ['image_processor', 'tokenizer']
snake_case__ : Optional[Any] = 'CLIPImageProcessor'
snake_case__ : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCAmelCase , )
lowercase = kwargs.pop("""feature_extractor""" )
lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowercase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
lowercase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
lowercase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
lowercase = self.tokenizer.model_input_names
lowercase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A__ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCAmelCase , )
return self.image_processor_class
@property
def A__ ( self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCAmelCase , )
return self.image_processor
| 32
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = Mock()
lowercase = conn, Mock()
lowercase = iter([1, None] )
lowercase = lambda lowerCAmelCase__ : next(lowerCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32
| 1
|
"""simple docstring"""
__lowerCAmelCase : int ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 32
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[Any] =logging.get_logger(__name__)
def UpperCAmelCase__ ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Dict:
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowercase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
'''simple docstring'''
lowercase = dct.pop(lowerCAmelCase__ )
lowercase = val
def UpperCAmelCase__ ( lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowercase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase__ )
lowercase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase = 1_0_2_4
lowercase = 4_0_9_6
lowercase = 2_4
lowercase = 1_6
lowercase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase = False
lowercase = """relu"""
lowercase = 1_0_2_4
lowercase = True
lowercase = False
lowercase = False
# load HuggingFace model
lowercase = ViTModel(lowerCAmelCase__ , add_pooling_layer=lowerCAmelCase__ )
lowercase = TrOCRForCausalLM(lowerCAmelCase__ )
lowercase = VisionEncoderDecoderModel(encoder=lowerCAmelCase__ , decoder=lowerCAmelCase__ )
model.eval()
# load state_dict of original model, rename some keys
lowercase = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location="""cpu""" , check_hash=lowerCAmelCase__ )["""model"""]
lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase = state_dict.pop(lowerCAmelCase__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowercase = val
else:
lowercase = val
# load state dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image
lowercase = ViTImageProcessor(size=encoder_config.image_size )
lowercase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowercase = TrOCRProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = processor(images=prepare_img(lowerCAmelCase__ ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowercase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase = model(pixel_values=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
lowercase = outputs.logits
lowercase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase__ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__lowerCAmelCase : Dict =parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 32
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCAmelCase : int =logging.get_logger(__name__)
__lowerCAmelCase : int ={
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class _A ( lowerCAmelCase , lowerCAmelCase ):
snake_case__ : Tuple = 'bit'
snake_case__ : Union[str, Any] = ['preactivation', 'bottleneck']
snake_case__ : Optional[Any] = ['SAME', 'VALID']
def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=64 , __lowerCAmelCase=[256, 512, 1024, 2048] , __lowerCAmelCase=[3, 4, 6, 3] , __lowerCAmelCase="preactivation" , __lowerCAmelCase="relu" , __lowerCAmelCase=None , __lowerCAmelCase=32 , __lowerCAmelCase=0.0 , __lowerCAmelCase=False , __lowerCAmelCase=32 , __lowerCAmelCase=1 , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
lowercase = global_padding.upper()
else:
raise ValueError(f'Padding strategy {global_padding} not supported' )
lowercase = num_channels
lowercase = embedding_size
lowercase = hidden_sizes
lowercase = depths
lowercase = layer_type
lowercase = hidden_act
lowercase = global_padding
lowercase = num_groups
lowercase = drop_path_rate
lowercase = embedding_dynamic_padding
lowercase = output_stride
lowercase = width_factor
lowercase = ["""stem"""] + [f'stage{idx}' for idx in range(1 , len(__lowerCAmelCase ) + 1 )]
lowercase , lowercase = get_aligned_output_features_output_indices(
out_features=__lowerCAmelCase , out_indices=__lowerCAmelCase , stage_names=self.stage_names )
| 32
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :int ) -> bool:
'''simple docstring'''
lowercase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 32
| 1
|
"""simple docstring"""
import operator
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :list | None = None ) -> list:
'''simple docstring'''
lowercase = operator.lt if reverse else operator.gt
lowercase = solution or []
if not arr:
return solution
lowercase = [arr.pop(0 )]
for i, item in enumerate(lowerCAmelCase__ ):
if _operator(lowerCAmelCase__ , sublist[-1] ):
sublist.append(lowerCAmelCase__ )
arr.pop(lowerCAmelCase__ )
# merging sublist into solution list
if not solution:
solution.extend(lowerCAmelCase__ )
else:
while sublist:
lowercase = sublist.pop(0 )
for i, xx in enumerate(lowerCAmelCase__ ):
if not _operator(lowerCAmelCase__ , lowerCAmelCase__ ):
solution.insert(lowerCAmelCase__ , lowerCAmelCase__ )
break
else:
solution.append(lowerCAmelCase__ )
strand_sort(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( lowerCAmelCase , unittest.TestCase ):
snake_case__ : str = KandinskyInpaintPipeline
snake_case__ : Optional[int] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
snake_case__ : Optional[int] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
snake_case__ : Tuple = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : Dict = False
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return 32
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def A__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def A__ ( self ):
"""simple docstring"""
return 100
@property
def A__ ( self ):
"""simple docstring"""
lowercase = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
lowercase = MultilingualCLIP(__lowerCAmelCase )
lowercase = text_encoder.eval()
return text_encoder
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
lowercase = UNetaDConditionModel(**__lowerCAmelCase )
return model
@property
def A__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase = VQModel(**self.dummy_movq_kwargs )
return model
def A__ ( self ):
"""simple docstring"""
lowercase = self.dummy_text_encoder
lowercase = self.dummy_tokenizer
lowercase = self.dummy_unet
lowercase = self.dummy_movq
lowercase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__lowerCAmelCase , set_alpha_to_one=__lowerCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__lowerCAmelCase , )
lowercase = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=0 ):
"""simple docstring"""
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCAmelCase )
# create init_image
lowercase = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCAmelCase ) ).to(__lowerCAmelCase )
lowercase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase = Image.fromarray(np.uinta(__lowerCAmelCase ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowercase = np.ones((64, 64) , dtype=np.floataa )
lowercase = 0
if str(__lowerCAmelCase ).startswith("""mps""" ):
lowercase = torch.manual_seed(__lowerCAmelCase )
else:
lowercase = torch.Generator(device=__lowerCAmelCase ).manual_seed(__lowerCAmelCase )
lowercase = {
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def A__ ( self ):
"""simple docstring"""
lowercase = """cpu"""
lowercase = self.get_dummy_components()
lowercase = self.pipeline_class(**__lowerCAmelCase )
lowercase = pipe.to(__lowerCAmelCase )
pipe.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = pipe(**self.get_dummy_inputs(__lowerCAmelCase ) )
lowercase = output.images
lowercase = pipe(
**self.get_dummy_inputs(__lowerCAmelCase ) , return_dict=__lowerCAmelCase , )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowercase = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def A__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
"""simple docstring"""
lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowercase = np.ones((768, 768) , dtype=np.floataa )
lowercase = 0
lowercase = """a hat"""
lowercase = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCAmelCase )
lowercase = KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" , torch_dtype=torch.floataa )
lowercase = pipeline.to(__lowerCAmelCase )
pipeline.set_progress_bar_config(disable=__lowerCAmelCase )
lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase , lowercase = pipe_prior(
__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowercase = pipeline(
__lowerCAmelCase , image=__lowerCAmelCase , mask_image=__lowerCAmelCase , image_embeds=__lowerCAmelCase , negative_image_embeds=__lowerCAmelCase , generator=__lowerCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowercase = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowerCAmelCase , __lowerCAmelCase )
| 32
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Tuple = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Union[str, Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : int = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[str] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[str] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Any = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Union[str, Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Tuple = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : List[Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
class _A ( metaclass=lowerCAmelCase ):
snake_case__ : Optional[Any] = ['flax']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ["""flax"""] )
| 32
|
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__lowerCAmelCase : Optional[Any] =logging.getLogger(__name__)
@dataclass
class _A ( lowerCAmelCase ):
snake_case__ : Optional[float] = field(
default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'Whether to SortishSamler or not.'} )
snake_case__ : bool = field(
default=lowerCAmelCase , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} )
snake_case__ : bool = field(default=lowerCAmelCase , metadata={'help': 'whether to use adafactor'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(default=lowerCAmelCase , metadata={'help': 'Dropout probability. Goes into model.config.'} )
snake_case__ : Optional[float] = field(
default=lowerCAmelCase , metadata={'help': 'Attention dropout probability. Goes into model.config.'} )
snake_case__ : Optional[str] = field(
default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 32
| 1
|
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :list , lowerCAmelCase__ :list , lowerCAmelCase__ :list , lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
lowercase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(lowerCAmelCase__ )] )
lowercase = np.array(lowerCAmelCase__ )
lowercase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , lowerCAmelCase__ ) ) , x.transpose() ) , lowerCAmelCase__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :list , lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
lowercase = (1, 2, 1)
lowercase = (1, 1, 0, 7)
lowercase = SARIMAX(
lowerCAmelCase__ , exog=lowerCAmelCase__ , order=lowerCAmelCase__ , seasonal_order=lowerCAmelCase__ )
lowercase = model.fit(disp=lowerCAmelCase__ , maxiter=6_0_0 , method="""nm""" )
lowercase = model_fit.predict(1 , len(lowerCAmelCase__ ) , exog=[test_match] )
return result[0]
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :list , lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
lowercase = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = regressor.predict(lowerCAmelCase__ )
return y_pred[0]
def UpperCAmelCase__ ( lowerCAmelCase__ :list ) -> float:
'''simple docstring'''
train_user.sort()
lowercase = np.percentile(lowerCAmelCase__ , 2_5 )
lowercase = np.percentile(lowerCAmelCase__ , 7_5 )
lowercase = qa - qa
lowercase = qa - (iqr * 0.1)
return low_lim
def UpperCAmelCase__ ( lowerCAmelCase__ :list , lowerCAmelCase__ :float ) -> bool:
'''simple docstring'''
lowercase = 0
lowercase = 0
for i in list_vote:
if i > actual_result:
lowercase = not_safe + 1
else:
if abs(abs(lowerCAmelCase__ ) - abs(lowerCAmelCase__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowerCAmelCase : str =[[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
__lowerCAmelCase : Union[str, Any] =pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
__lowerCAmelCase : Any =Normalizer().fit_transform(data_input_df.values)
# split data
__lowerCAmelCase : str =normalize_df[:, 2].tolist()
__lowerCAmelCase : int =normalize_df[:, 0].tolist()
__lowerCAmelCase : Any =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowerCAmelCase : Optional[Any] =normalize_df[:, [1, 2]].tolist()
__lowerCAmelCase : str =x[: len(x) - 1]
__lowerCAmelCase : Optional[int] =x[len(x) - 1 :]
# for linear regression & sarimax
__lowerCAmelCase : Optional[Any] =total_date[: len(total_date) - 1]
__lowerCAmelCase : Optional[int] =total_user[: len(total_user) - 1]
__lowerCAmelCase : Optional[int] =total_match[: len(total_match) - 1]
__lowerCAmelCase : str =total_date[len(total_date) - 1 :]
__lowerCAmelCase : Union[str, Any] =total_user[len(total_user) - 1 :]
__lowerCAmelCase : List[Any] =total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowerCAmelCase : str =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowerCAmelCase : Any ="""""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 32
|
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowercase = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""" )
if "img_encoder.patch_embed.proj" in name:
lowercase = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""" )
if "img_encoder.patch_embed.norm" in name:
lowercase = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""" )
if "img_encoder.layers" in name:
lowercase = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""" )
if "blocks" in name and "res" not in name:
lowercase = name.replace("""blocks""" , """layers""" )
if "attn" in name and "pre_assign" not in name:
lowercase = name.replace("""attn""" , """self_attn""" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowercase = name.replace("""proj""" , """out_proj""" )
if "pre_assign_attn.attn.proj" in name:
lowercase = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layer_norm1""" )
if "norm2" in name and "pre_assign" not in name:
lowercase = name.replace("""norm2""" , """layer_norm2""" )
if "img_encoder.norm" in name:
lowercase = name.replace("""img_encoder.norm""" , """vision_model.layernorm""" )
# text encoder
if "text_encoder.token_embedding" in name:
lowercase = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""" )
if "text_encoder.positional_embedding" in name:
lowercase = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "text_encoder.transformer.resblocks." in name:
lowercase = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""" )
if "ln_1" in name:
lowercase = name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
lowercase = name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
lowercase = name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
lowercase = name.replace("""c_proj""" , """fc2""" )
if "text_encoder" in name:
lowercase = name.replace("""text_encoder""" , """text_model""" )
if "ln_final" in name:
lowercase = name.replace("""ln_final""" , """final_layer_norm""" )
# projection layers
if "img_projector.linear_hidden." in name:
lowercase = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""" )
if "img_projector.linear_out." in name:
lowercase = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""" )
if "text_projector.linear_hidden" in name:
lowercase = name.replace("""text_projector.linear_hidden""" , """text_projection""" )
if "text_projector.linear_out" in name:
lowercase = name.replace("""text_projector.linear_out""" , """text_projection.3""" )
return name
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Union[str, Any] ) -> List[str]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase , lowercase = int(key_split[2] ), int(key_split[4] )
lowercase = config.vision_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = config.text_config.hidden_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[
dim : dim * 2, :
]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
else:
lowercase = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowercase = val.squeeze_()
else:
lowercase = val
return orig_state_dict
def UpperCAmelCase__ ( ) -> Union[str, Any]:
'''simple docstring'''
lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def UpperCAmelCase__ ( lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :int="groupvit-gcc-yfcc" , lowerCAmelCase__ :List[Any]=False ) -> str:
'''simple docstring'''
lowercase = GroupViTConfig()
lowercase = GroupViTModel(lowerCAmelCase__ ).eval()
lowercase = torch.load(lowerCAmelCase__ , map_location="""cpu""" )["""model"""]
lowercase = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase , lowercase = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
lowercase = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
lowercase = prepare_img()
lowercase = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="""pt""" )
with torch.no_grad():
lowercase = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
lowercase = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowercase = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print("""Successfully saved processor and model to""" , lowerCAmelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
processor.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
model.push_to_hub(lowerCAmelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to dump the processor and PyTorch model."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to GroupViT checkpoint""")
parser.add_argument(
"""--model_name""",
default="""groupvit-gccy-fcc""",
type=str,
help="""Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.""",
)
__lowerCAmelCase : int =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 32
| 1
|
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ ( lowerCAmelCase__ :int = 2_0 ) -> int:
'''simple docstring'''
lowercase = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase = n // 2
return int(factorial(lowerCAmelCase__ ) / (factorial(lowerCAmelCase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(2_0))
else:
try:
__lowerCAmelCase : Optional[int] =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 32
|
"""simple docstring"""
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
lowercase = len(__lowerCAmelCase )
lowercase = None
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if sources is int:
lowercase = [sources]
if sinks is int:
lowercase = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
lowercase = sources[0]
lowercase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
lowercase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowercase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowercase = max_input_flow
lowercase = 0
lowercase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowercase = max_input_flow
lowercase = size - 1
def A__ ( self ):
"""simple docstring"""
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = algorithm(self )
class _A :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = flow_network
lowercase = flow_network.verticesCount
lowercase = flow_network.sourceIndex
lowercase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowercase = flow_network.graph
lowercase = False
def A__ ( self ):
"""simple docstring"""
if not self.executed:
self._algorithm()
lowercase = True
def A__ ( self ):
"""simple docstring"""
pass
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
# use this to save your result
lowercase = -1
def A__ ( self ):
"""simple docstring"""
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
super().__init__(__lowerCAmelCase )
lowercase = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowercase = [0] * self.verticies_count
lowercase = [0] * self.verticies_count
def A__ ( self ):
"""simple docstring"""
lowercase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowercase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowercase = 0
while i < len(__lowerCAmelCase ):
lowercase = vertices_list[i]
lowercase = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
lowercase = 0
else:
i += 1
lowercase = sum(self.preflow[self.source_index] )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowercase = self.heights[to_index]
if min_height is not None:
lowercase = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : int =[0]
__lowerCAmelCase : List[Any] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : Optional[int] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Tuple =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Optional[int] =flow_network.find_maximum_flow()
print(F"""maximum flow is {maximum_flow}""")
| 32
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.