code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_with_bytefallback.model')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = GPTSwaTokenizer
__snake_case : Dict = False
__snake_case : Optional[Any] = True
__snake_case : List[Any] = False
def A ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : List[Any] = GPTSwaTokenizer(UpperCAmelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : List[str] , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = """This is a test"""
lowerCAmelCase_ : Optional[Any] = """This is a test"""
return input_text, output_text
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[Any] = """<s>"""
lowerCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase ) , UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase ) , UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(UpperCAmelCase ) , 20_00 )
def A ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 20_00 )
def A ( self : List[str] ):
lowerCAmelCase_ : int = GPTSwaTokenizer(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(UpperCAmelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , [4_65, 2_87, 2_65, 6_31, 8_42] )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
# fmt: off
self.assertListEqual(
UpperCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
lowerCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
self.assertListEqual(
UpperCAmelCase , [2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60] , )
lowerCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(UpperCAmelCase )
# fmt: off
self.assertListEqual(
UpperCAmelCase , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def A ( self : Dict ):
lowerCAmelCase_ : str = GPTSwaTokenizer(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowerCAmelCase_ : int = [
[4_65, 2_87, 2_65, 6_31, 8_42],
[2_62, 2_72, 15_25, 2_86, 2_71, 2_68, 60, 9_16, 6_33, 6_33, 6_33, 2_59, 2_66, 3_01, 2_87, 3_84, 3_67, 2_63, 1_98, 1_72, 2_60],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertListEqual(tokenizer.encode_fast(UpperCAmelCase ) , UpperCAmelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(tokenizer.decode_fast(UpperCAmelCase ) , UpperCAmelCase )
@slow
def A ( self : Any ):
lowerCAmelCase_ : Any = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowerCAmelCase_ : Tuple = {"""input_ids""": [[6_34_23, 5, 68_11, 1_49_54, 2_82, 8_16, 38_21, 6_34_66, 6_34_25, 6_34_62, 18, 6_39_78, 6_78, 3_01, 13_20, 6_34_23, 6_34_55, 6_34_58, 18, 6_39_82, 42_46, 39_40, 19_01, 4_77_89, 55_47, 1_89_94], [1_96_30, 11_00, 6_34_46, 13_42, 6_33, 5_44, 44_88, 5_93, 51_02, 24_16, 6_34_95, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [16_52, 4_28, 2_68, 19_36, 5_15, 2_68, 5_85_93, 2_24_13, 91_06, 5_46, 2_68, 3_32_13, 6_39_79, 6_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_51_30, 6_34_50, 9_24, 6_34_49, 22_49, 40_62, 15_58, 3_18, 6_35_04, 2_14_98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_09, 3_77, 28_27, 25_59, 3_32, 65_75, 6_34_43, 2_68_01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=UpperCAmelCase , )
| 28
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __UpperCamelCase ( lowercase__ : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def __UpperCamelCase ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> XGBClassifier:
'''simple docstring'''
lowerCAmelCase_ : str = XGBClassifier()
classifier.fit(lowercase__ , lowercase__ )
return classifier
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ : int = load_iris()
lowerCAmelCase_ , lowerCAmelCase_ : str = data_handling(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = train_test_split(
lowercase__ , lowercase__ , test_size=0.25 )
lowerCAmelCase_ : Union[str, Any] = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
lowerCAmelCase_ : Union[str, Any] = xgboost(lowercase__ , lowercase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase__ , lowercase__ , lowercase__ , display_labels=lowercase__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 28
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
| 1
|
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : str = [False] * len(lowercase__ )
lowerCAmelCase_ : Tuple = [s]
lowerCAmelCase_ : str = True
while queue:
lowerCAmelCase_ : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : int = u
return visited[t]
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Tuple , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [-1] * (len(lowercase__ ))
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : str = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ : List[str] = float("""Inf""" )
lowerCAmelCase_ : str = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ : int = min(lowercase__ , graph[parent[s]][s] )
lowerCAmelCase_ : Optional[Any] = parent[s]
max_flow += path_flow
lowerCAmelCase_ : List[Any] = sink
while v != source:
lowerCAmelCase_ : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ : Optional[Any] = parent[v]
for i in range(len(lowercase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 28
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 1
|
def __UpperCamelCase ( lowercase__ : int ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = [0] * len(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : Union[str, Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase__ ) ):
if indegree[i] == 0:
queue.append(lowercase__ )
while queue:
lowerCAmelCase_ : List[Any] = queue.pop(0 )
cnt += 1
topo.append(lowercase__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(lowercase__ )
if cnt != len(lowercase__ ):
print("""Cycle exists""" )
else:
print(lowercase__ )
# Adjacency List of Graph
__UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __a ( __UpperCamelCase ):
@staticmethod
@abstractmethod
def A ( UpperCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def A ( self : str ):
raise NotImplementedError()
| 28
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28
| 1
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : str ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A ( self : Tuple , UpperCAmelCase : int=None ):
lowerCAmelCase_ : Union[str, Any] = {}
if top_k is not None:
lowerCAmelCase_ : Union[str, Any] = top_k
return {}, {}, postprocess_params
def __call__( self : Union[str, Any] , UpperCAmelCase : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCAmelCase : Any ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Dict , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : List[str] = load_image(UpperCAmelCase )
lowerCAmelCase_ : List[str] = self.image_processor(images=UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A ( self : Dict , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Dict = self.model(**UpperCAmelCase )
return model_outputs
def A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[Any]=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ : List[str] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = probs.topk(UpperCAmelCase )
elif self.framework == "tf":
lowerCAmelCase_ : List[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCAmelCase_ : Tuple = tf.math.top_k(UpperCAmelCase , k=UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowerCAmelCase_ : str = scores.tolist()
lowerCAmelCase_ : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 28
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ : Any = precision
lowerCAmelCase_ : Any = ceil(precision / 14 )
lowerCAmelCase_ : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Optional[int] = 13591409
lowerCAmelCase_ : Union[str, Any] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 28
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __a ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = parent
def A ( self : int ):
return {}
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
lowerCAmelCase_ : int = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = MarkupLMFeatureExtractionTester(self )
@property
def A ( self : List[str] ):
return self.feature_extract_tester.prepare_feat_extract_dict()
def A ( self : Union[str, Any] ):
# Initialize feature_extractor
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class()
# Test not batched input
lowerCAmelCase_ : List[Any] = get_html_strings()[0]
lowerCAmelCase_ : Any = feature_extractor(UpperCAmelCase )
# fmt: off
lowerCAmelCase_ : Optional[Any] = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
lowerCAmelCase_ : Dict = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase )
# Test batched
lowerCAmelCase_ : int = get_html_strings()
lowerCAmelCase_ : Tuple = feature_extractor(UpperCAmelCase )
# fmt: off
lowerCAmelCase_ : Any = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
lowerCAmelCase_ : Any = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , UpperCAmelCase )
self.assertEqual(encoding.xpaths , UpperCAmelCase )
| 28
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : Dict = ["""image_processor""", """tokenizer"""]
__snake_case : Union[str, Any] = """ViltImageProcessor"""
__snake_case : Optional[int] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Tuple , UpperCAmelCase : List[str]=None , UpperCAmelCase : Dict=None , **UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
lowerCAmelCase_ : List[str] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = self.image_processor
def __call__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Optional[Any] , ):
lowerCAmelCase_ : Any = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : Any = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
encoding.update(UpperCAmelCase )
return encoding
def A ( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Tuple ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : int , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : Optional[int] ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : Tuple ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase , )
return self.image_processor_class
@property
def A ( self : List[str] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase , )
return self.image_processor
| 28
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 1
|
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __UpperCamelCase ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = prime_factors(lowercase__ )
if is_square_free(lowercase__ ):
return -1 if len(lowercase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
| 1
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCAmelCase_ : Any = 4
lowerCAmelCase_ : Tuple = 48
lowerCAmelCase_ : Any = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ : Any = [6, 6, 6, 6]
lowerCAmelCase_ : Tuple = 60
lowerCAmelCase_ : int = [6, 6, 6, 6]
lowerCAmelCase_ : List[Any] = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = 4
lowerCAmelCase_ : Optional[Any] = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : List[Any] = 126
lowerCAmelCase_ : Optional[Any] = 7
lowerCAmelCase_ : List[Any] = 255.0
lowerCAmelCase_ : Any = """"""
return config
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
lowerCAmelCase_ : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCAmelCase_ : List[str] = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCAmelCase_ : Tuple = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCAmelCase_ : Tuple = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCAmelCase_ : List[Any] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCAmelCase_ : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCAmelCase_ : Dict = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCAmelCase_ : Union[str, Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCAmelCase_ : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCAmelCase_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCAmelCase_ : List[str] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCAmelCase_ : Tuple = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCAmelCase_ : Tuple = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCAmelCase_ : Optional[Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCAmelCase_ : Any = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCAmelCase_ : Optional[Any] = """layernorm.weight"""
if name == "norm.bias":
lowerCAmelCase_ : str = """layernorm.bias"""
if "conv_first" in name:
lowerCAmelCase_ : str = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCAmelCase_ : Optional[int] = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCAmelCase_ : int = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCAmelCase_ : Dict = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCAmelCase_ : Any = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCAmelCase_ : Optional[Any] = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCAmelCase_ : Optional[Any] = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCAmelCase_ : Any = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCAmelCase_ : Dict = """swin2sr.""" + name
return name
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : str ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ : Tuple = orig_state_dict.pop(lowercase__ )
if "qkv" in key:
lowerCAmelCase_ : Tuple = key.split(""".""" )
lowerCAmelCase_ : str = int(key_split[1] )
lowerCAmelCase_ : Tuple = int(key_split[4] )
lowerCAmelCase_ : str = config.embed_dim
if "weight" in key:
lowerCAmelCase_ : Union[str, Any] = val[:dim, :]
lowerCAmelCase_ : List[Any] = val[dim : dim * 2, :]
lowerCAmelCase_ : int = val[-dim:, :]
else:
lowerCAmelCase_ : List[Any] = val[:dim]
lowerCAmelCase_ : Optional[int] = val[dim : dim * 2]
lowerCAmelCase_ : List[Any] = val[-dim:]
pass
else:
lowerCAmelCase_ : Tuple = val
return orig_state_dict
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[str] = get_config(lowercase__ )
lowerCAmelCase_ : Any = SwinaSRForImageSuperResolution(lowercase__ )
model.eval()
lowerCAmelCase_ : List[str] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
lowerCAmelCase_ : List[Any] = convert_state_dict(lowercase__ , lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ : str = model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(lowercase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
lowerCAmelCase_ : Tuple = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCAmelCase_ : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert("""RGB""" )
lowerCAmelCase_ : Dict = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCAmelCase_ : Any = 126 if """Jpeg""" in checkpoint_url else 256
lowerCAmelCase_ : Optional[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase_ : int = transforms(lowercase__ ).unsqueeze(0 )
if config.num_channels == 1:
lowerCAmelCase_ : int = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCAmelCase_ : int = model(lowercase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = torch.Size([1, 3, 512, 512] )
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCAmelCase_ : Optional[Any] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCAmelCase_ : List[str] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ : str = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCAmelCase_ : Tuple = torch.Size([1, 3, 512, 512] )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCAmelCase_ : List[str] = torch.Size([1, 3, 1024, 1024] )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1E-3 )
print("""Looks ok!""" )
lowerCAmelCase_ : Tuple = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCAmelCase_ : Dict = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
__UpperCAmelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
__UpperCAmelCase = {
'camembert-base': 5_12,
}
__UpperCAmelCase = '▁'
class __a ( __UpperCamelCase ):
__snake_case : Tuple = VOCAB_FILES_NAMES
__snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : int="</s>" , UpperCAmelCase : List[Any]="</s>" , UpperCAmelCase : Union[str, Any]="<s>" , UpperCAmelCase : int="<unk>" , UpperCAmelCase : Any="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Optional[int] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
lowerCAmelCase_ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
lowerCAmelCase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
lowerCAmelCase_ : Union[str, Any] = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase_ : int = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
lowerCAmelCase_ : int = len(self.fairseq_tokens_to_ids )
lowerCAmelCase_ : str = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase_ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase_ : List[Any] = [self.cls_token_id]
lowerCAmelCase_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A ( self : int ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A ( self : Any , UpperCAmelCase : str ):
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : Optional[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(UpperCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A ( self : Optional[int] , UpperCAmelCase : int ):
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Union[str, Any] = """"""
lowerCAmelCase_ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase ) + token
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[Any] = []
else:
current_sub_tokens.append(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = False
out_string += self.sp_model.decode(UpperCAmelCase )
return out_string.strip()
def __getstate__( self : Any ):
lowerCAmelCase_ : int = self.__dict__.copy()
lowerCAmelCase_ : int = None
return state
def __setstate__( self : str , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : List[Any] = {}
lowerCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : str = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , """wb""" ) as fi:
lowerCAmelCase_ : Tuple = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
return (out_vocab_file,)
| 28
|
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str]=13 , UpperCAmelCase : str=32 , UpperCAmelCase : str=3 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] , UpperCAmelCase : Any=[2, 2, 3, 2] , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : Optional[Any]=10 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : List[str]=["stage2", "stage3", "stage4"] , UpperCAmelCase : Union[str, Any]=[2, 3, 4] , UpperCAmelCase : Dict=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Dict = batch_size
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : int = num_stages
lowerCAmelCase_ : int = hidden_sizes
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : str = out_features
lowerCAmelCase_ : Dict = out_indices
lowerCAmelCase_ : Optional[int] = scope
def A ( self : str ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A ( self : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : str = ConvNextModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Dict = ConvNextForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Any = ConvNextBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Dict = ConvNextBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : int = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__snake_case : Any = (
{"""feature-extraction""": ConvNextModel, """image-classification""": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Optional[Any] = True
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = False
__snake_case : Dict = False
def A ( self : str ):
lowerCAmelCase_ : List[str] = ConvNextModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : str ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[str] ):
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def A ( self : Union[str, Any] ):
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def A ( self : Optional[int] ):
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def A ( self : List[Any] ):
pass
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(UpperCAmelCase )
lowerCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : int = [*signature.parameters.keys()]
lowerCAmelCase_ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A ( self : List[Any] ):
def check_hidden_states_output(UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Tuple = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A ( self : Union[str, Any] ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = ConvNextModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Any = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(UpperCAmelCase )
lowerCAmelCase_ : str = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Dict = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@require_torch
class __a ( unittest.TestCase ,__UpperCamelCase ):
__snake_case : Any = (ConvNextBackbone,) if is_torch_available() else ()
__snake_case : Any = ConvNextConfig
__snake_case : Union[str, Any] = False
def A ( self : int ):
lowerCAmelCase_ : List[Any] = ConvNextModelTester(self )
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 1
|
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __a ( __UpperCamelCase ):
__snake_case : Any = """marian"""
__snake_case : List[Any] = ["""past_key_values"""]
__snake_case : int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : int , UpperCAmelCase : Dict=5_81_01 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=10_24 , UpperCAmelCase : int=12 , UpperCAmelCase : Tuple=40_96 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Any=40_96 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Any=10_24 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : List[str]=5_81_00 , UpperCAmelCase : str=False , UpperCAmelCase : Optional[int]=5_81_00 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : str=True , **UpperCAmelCase : List[str] , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Optional[int] = decoder_vocab_size or vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Optional[int] = encoder_ffn_dim
lowerCAmelCase_ : List[str] = encoder_layers
lowerCAmelCase_ : Optional[int] = encoder_attention_heads
lowerCAmelCase_ : Any = decoder_ffn_dim
lowerCAmelCase_ : int = decoder_layers
lowerCAmelCase_ : Tuple = decoder_attention_heads
lowerCAmelCase_ : List[Any] = dropout
lowerCAmelCase_ : Optional[Any] = attention_dropout
lowerCAmelCase_ : str = activation_dropout
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : Optional[Any] = init_std
lowerCAmelCase_ : Optional[Any] = encoder_layerdrop
lowerCAmelCase_ : Optional[int] = decoder_layerdrop
lowerCAmelCase_ : int = use_cache
lowerCAmelCase_ : Union[str, Any] = encoder_layers
lowerCAmelCase_ : int = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCAmelCase_ : str = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
class __a ( __UpperCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A ( self : str ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : int = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = {0: """batch"""}
lowerCAmelCase_ : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
lowerCAmelCase_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
lowerCAmelCase_ : Dict = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
lowerCAmelCase_ : Optional[int] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.num_layers
for i in range(UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase_ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : Any = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : str = super().outputs
else:
lowerCAmelCase_ : Tuple = super(UpperCAmelCase , self ).outputs
if self.use_past:
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.num_layers
for i in range(UpperCAmelCase ):
lowerCAmelCase_ : List[str] = {0: """batch""", 2: """past_sequence + sequence"""}
lowerCAmelCase_ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def A ( self : Optional[int] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Dict = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Generate decoder inputs
lowerCAmelCase_ : str = seq_length if not self.use_past else 1
lowerCAmelCase_ : Tuple = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
lowerCAmelCase_ : str = dict(**UpperCAmelCase , **UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = common_inputs["""input_ids"""].shape
lowerCAmelCase_ : Dict = common_inputs["""decoder_input_ids"""].shape[1]
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.num_attention_heads
lowerCAmelCase_ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : Any = decoder_seq_length + 3
lowerCAmelCase_ : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
lowerCAmelCase_ : int = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase )] , dim=1 )
lowerCAmelCase_ : str = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.num_layers
lowerCAmelCase_ : Optional[Any] = min(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = max(UpperCAmelCase , UpperCAmelCase ) - min_num_layers
lowerCAmelCase_ : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(UpperCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
torch.zeros(UpperCAmelCase ),
) )
# TODO: test this.
lowerCAmelCase_ : Tuple = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(UpperCAmelCase , UpperCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) )
return common_inputs
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : str = self._generate_dummy_inputs_for_encoder_and_decoder(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Union[str, Any] = seqlen + 2
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.num_layers
lowerCAmelCase_ , lowerCAmelCase_ : int = self.num_attention_heads
lowerCAmelCase_ : Dict = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
lowerCAmelCase_ : int = common_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : List[str] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
lowerCAmelCase_ : Any = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(UpperCAmelCase )
]
return common_inputs
def A ( self : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase_ : List[str] = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase_ : List[Any] = tokenizer.num_special_tokens_to_add(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase_ : str = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
lowerCAmelCase_ : Any = dict(tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
return common_inputs
def A ( self : str , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
else:
lowerCAmelCase_ : Optional[int] = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
return common_inputs
def A ( self : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
lowerCAmelCase_ : Tuple = super()._flatten_past_key_values_(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
else:
lowerCAmelCase_ : Union[str, Any] = super(UpperCAmelCase , self )._flatten_past_key_values_(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@property
def A ( self : List[str] ):
return 1e-4
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28
| 1
|
def __UpperCamelCase ( lowercase__ : int = 10**9 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
lowerCAmelCase_ : str = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f"""{solution() = }""")
| 28
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __a ( __UpperCamelCase ):
__snake_case : int = """facebook/nllb-200-distilled-600M"""
__snake_case : Optional[int] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__snake_case : str = """translator"""
__snake_case : Any = AutoTokenizer
__snake_case : Union[str, Any] = AutoModelForSeqaSeqLM
__snake_case : Optional[int] = LANGUAGE_CODES
__snake_case : int = ["""text""", """text""", """text"""]
__snake_case : str = ["""text"""]
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
lowerCAmelCase_ : List[Any] = self.lang_to_code[src_lang]
lowerCAmelCase_ : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase , return_tensors="""pt""" , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : str ):
return self.model.generate(**UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase )
| 28
| 1
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase_ : Tuple = BitConfig(
conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase_ : Dict = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase_ : List[str] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase_ : Any = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_config(lowercase__ )
# load original model from timm
lowerCAmelCase_ : str = create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase_ : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase_ : List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase_ : Dict = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase_ : Tuple = BitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# create image processor
lowerCAmelCase_ : Tuple = create_transform(**resolve_data_config({} , model=lowercase__ ) )
lowerCAmelCase_ : Union[str, Any] = transform.transforms
lowerCAmelCase_ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase_ : List[str] = BitImageProcessor(
do_resize=lowercase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = transform(lowercase__ ).unsqueeze(0 )
lowerCAmelCase_ : List[str] = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(lowercase__ )
lowerCAmelCase_ : List[str] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 1
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__UpperCAmelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __a ( tr.AbstractTransform ):
def __init__( self : Dict , UpperCAmelCase : str = " " ):
lowerCAmelCase_ : Tuple = sentence_delimiter
def A ( self : Any , UpperCAmelCase : str ):
return list(UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = []
for sent_idx, sentence in enumerate(UpperCAmelCase ):
chars.extend(self.process_string(UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__UpperCAmelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__UpperCAmelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__UpperCAmelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
__UpperCAmelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
__UpperCAmelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
def A ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def A ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )["wer"]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Dict = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 28
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28
| 1
|
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def __UpperCamelCase ( lowercase__ : Matrix , lowercase__ : Matrix ) -> Matrix:
'''simple docstring'''
lowerCAmelCase_ : int = len(lowercase__ )
lowerCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase__ )]
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float
for row in range(lowercase__ ):
for col in range(lowercase__ ):
lowerCAmelCase_ : Tuple = matrix[row][col]
lowerCAmelCase_ : Union[str, Any] = vector[row][0]
lowerCAmelCase_ : Dict = 0
lowerCAmelCase_ : str = 0
while row < size and col < size:
# pivoting
lowerCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase__ , lowercase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase__ ):
lowerCAmelCase_ : str = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase_ : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase__ ):
for row in range(lowercase__ ):
lowerCAmelCase_ : List[Any] = augmented[row][col] / augmented[col][col]
for cola in range(lowercase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase__ )
]
def __UpperCamelCase ( lowercase__ : list[int] ) -> Callable[[int], int]:
'''simple docstring'''
lowerCAmelCase_ : int = len(lowercase__ )
lowerCAmelCase_ : Matrix = [[0 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
lowerCAmelCase_ : Matrix = [[0] for _ in range(lowercase__ )]
lowerCAmelCase_ : Matrix
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
for x_val, y_val in enumerate(lowercase__ ):
for col in range(lowercase__ ):
lowerCAmelCase_ : Tuple = (x_val + 1) ** (size - col - 1)
lowerCAmelCase_ : Any = y_val
lowerCAmelCase_ : Optional[Any] = solve(lowercase__ , lowercase__ )
def interpolated_func(lowercase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase__ ) )
return interpolated_func
def __UpperCamelCase ( lowercase__ : int ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __UpperCamelCase ( lowercase__ : Callable[[int], int] = question_function , lowercase__ : int = 10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : list[int] = [func(lowercase__ ) for x_val in range(1 , order + 1 )]
lowerCAmelCase_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Callable[[int], int]
lowerCAmelCase_ : int
for poly in polynomials:
lowerCAmelCase_ : int = 1
while func(lowercase__ ) == poly(lowercase__ ):
x_val += 1
ret += poly(lowercase__ )
return ret
if __name__ == "__main__":
print(f"""{solution() = }""")
| 28
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : int ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(lowercase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(lowercase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(lowercase__ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = ["""pixel_values"""]
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 2_55 , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : Tuple , ):
super().__init__(**UpperCAmelCase )
lowerCAmelCase_ : Any = size if size is not None else {"""shortest_edge""": 2_24}
lowerCAmelCase_ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCAmelCase_ : Any = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
lowerCAmelCase_ : Optional[int] = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
lowerCAmelCase_ : Any = do_resize
lowerCAmelCase_ : Optional[int] = size
lowerCAmelCase_ : int = do_center_crop
lowerCAmelCase_ : int = crop_size
lowerCAmelCase_ : Optional[Any] = resample
lowerCAmelCase_ : List[Any] = do_rescale
lowerCAmelCase_ : int = rescale_factor
lowerCAmelCase_ : Any = do_normalize
lowerCAmelCase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A ( self : List[str] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ):
lowerCAmelCase_ : str = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
lowerCAmelCase_ : Tuple = get_resize_output_image_size(UpperCAmelCase , size["""shortest_edge"""] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCAmelCase_ : Tuple = (size["""height"""], size["""width"""])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ):
lowerCAmelCase_ : List[str] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ):
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ : List[str] = to_numpy_array(UpperCAmelCase )
if do_resize:
lowerCAmelCase_ : str = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
lowerCAmelCase_ : str = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
lowerCAmelCase_ : List[Any] = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase )
if do_normalize:
lowerCAmelCase_ : str = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
lowerCAmelCase_ : Dict = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def A ( self : List[Any] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Dict , ):
lowerCAmelCase_ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : str = resample if resample is not None else self.resample
lowerCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : int = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : Any = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ : List[str] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ : List[str] = image_std if image_std is not None else self.image_std
lowerCAmelCase_ : Union[str, Any] = size if size is not None else self.size
lowerCAmelCase_ : Union[str, Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : Any = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
lowerCAmelCase_ : str = make_batched(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
lowerCAmelCase_ : Optional[int] = {"""pixel_values""": videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 28
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : Any = ["""image_processor""", """tokenizer"""]
__snake_case : Tuple = """BlipImageProcessor"""
__snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : str = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase_ : str = self.tokenizer
lowerCAmelCase_ : List[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
lowerCAmelCase_ : int = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : int ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28
| 1
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = torch.nn.Linear(2 , 4 )
lowerCAmelCase_ : str = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase_ : int = torch.optim.lr_scheduler.OneCycleLR(lowercase__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase_ : Dict = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase_ : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __UpperCamelCase ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase__ )
class __a ( __UpperCamelCase ):
@require_cuda
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(UpperCAmelCase ):
lowerCAmelCase_ : List[str] = Accelerator(cpu=UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : List[Any] = Accelerator()
lowerCAmelCase_ : List[str] = GradientState()
assert state.num_steps == 1
lowerCAmelCase_ : List[Any] = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase_ : Union[str, Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = create_components()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Optional[int] = accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = create_components()
accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A ( self : List[Any] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ):
pass
with patch("""torch.cuda.set_device""" , UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
lowerCAmelCase_ : Tuple = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def A ( self : str ):
lowerCAmelCase_ : List[Any] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = create_components()
accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Any = get_signature(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase )
# make sure random weights don't match
load_random_weights(UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) < 1e-3 )
def A ( self : Dict ):
lowerCAmelCase_ : List[Any] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = create_components()
accelerator.prepare(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Dict = get_signature(UpperCAmelCase )
# saving hook
def save_config(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : List[str] = {"""class_name""": models[0].__class__.__name__}
with open(os.path.join(UpperCAmelCase , """data.json""" ) , """w""" ) as f:
json.dump(UpperCAmelCase , UpperCAmelCase )
# loading hook
def load_config(UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[Any] ):
with open(os.path.join(UpperCAmelCase , """data.json""" ) , """r""" ) as f:
lowerCAmelCase_ : Tuple = json.load(UpperCAmelCase )
lowerCAmelCase_ : str = config["""class_name"""]
lowerCAmelCase_ : Union[str, Any] = accelerator.register_save_state_pre_hook(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = accelerator.register_load_state_pre_hook(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ : Tuple = """random"""
# make sure loaded weights match with hooks
accelerator.load_state(UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase_ : str = """random"""
# make sure loaded weights match with hooks removed
accelerator.load_state(UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(UpperCAmelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : List[Any] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = create_components()
lowerCAmelCase_ : str = None
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def A ( self : int ):
lowerCAmelCase_ : Dict = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = create_components()
lowerCAmelCase_ : List[str] = [1, 2, 3]
# This should work
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertEqual(
getattr(UpperCAmelCase , """_is_accelerate_prepared""" , UpperCAmelCase ) , UpperCAmelCase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(UpperCAmelCase , """_is_accelerate_prepared""" , UpperCAmelCase ) , UpperCAmelCase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCAmelCase , """_is_accelerate_prepared""" , UpperCAmelCase ) , UpperCAmelCase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCAmelCase , """_is_accelerate_prepared""" , UpperCAmelCase ) , UpperCAmelCase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCAmelCase , """_is_accelerate_prepared""" , UpperCAmelCase ) , UpperCAmelCase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(UpperCAmelCase , """_is_accelerate_prepared""" , UpperCAmelCase ) , UpperCAmelCase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def A ( self : List[str] ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCAmelCase , device_map={"""""": 0} , )
lowerCAmelCase_ : Union[str, Any] = Accelerator()
# This should work
lowerCAmelCase_ : Tuple = accelerator.prepare(UpperCAmelCase )
@slow
@require_bnb
def A ( self : List[str] ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ : List[Any] = Accelerator()
with init_empty_weights():
lowerCAmelCase_ : int = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
lowerCAmelCase_ : Dict = infer_auto_device_map(UpperCAmelCase )
lowerCAmelCase_ : Tuple = """cpu"""
lowerCAmelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=UpperCAmelCase , load_in_abit=UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(UpperCAmelCase ):
lowerCAmelCase_ : Dict = accelerator.prepare(UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def A ( self : Dict ):
from transformers import AutoModelForCausalLM
lowerCAmelCase_ : str = {"""distributed_type""": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
lowerCAmelCase_ : List[Any] = infer_auto_device_map(UpperCAmelCase )
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCAmelCase , device_map=UpperCAmelCase , )
lowerCAmelCase_ : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(UpperCAmelCase ):
lowerCAmelCase_ : int = accelerator.prepare(UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A ( self : Tuple ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase_ : Dict = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
lowerCAmelCase_ : Optional[int] = infer_auto_device_map(UpperCAmelCase )
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=UpperCAmelCase , device_map=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = Accelerator()
# This should work
lowerCAmelCase_ : Any = accelerator.prepare(UpperCAmelCase )
@require_cuda
def A ( self : List[str] ):
lowerCAmelCase_ : List[Any] = torch.nn.Linear(10 , 10 )
lowerCAmelCase_ : Dict = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCAmelCase_ : List[Any] = Accelerator(cpu=UpperCAmelCase )
lowerCAmelCase_ : int = accelerator.prepare(UpperCAmelCase )
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __UpperCamelCase ( lowercase__ : int = 8 ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(lowercase__ ) for _ in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
i -= len(lowercase__ )
lowerCAmelCase_ : Optional[int] = i // 3
lowerCAmelCase_ : Tuple = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCAmelCase_ : str = (
chars_incl
+ random(lowercase__ , quotient + remainder )
+ random(lowercase__ , lowercase__ )
+ random(lowercase__ , lowercase__ )
)
lowerCAmelCase_ : Any = list(lowercase__ )
shuffle(lowercase__ )
return "".join(lowercase__ )
# random is a generalised function for letters, characters and numbers
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(lowercase__ ) for _ in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> Union[str, Any]:
'''simple docstring'''
pass # Put your code here...
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
pass # Put your code here...
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
pass # Put your code here...
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int = 8 ) -> bool:
'''simple docstring'''
if len(lowercase__ ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCAmelCase_ : Tuple = any(char in ascii_uppercase for char in password )
lowerCAmelCase_ : Optional[Any] = any(char in ascii_lowercase for char in password )
lowerCAmelCase_ : Tuple = any(char in digits for char in password )
lowerCAmelCase_ : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = int(input("""Please indicate the max length of your password: """ ).strip() )
lowerCAmelCase_ : int = input(
"""Please indicate the characters that must be in your password: """ ).strip()
print("""Password generated:""" , password_generator(lowercase__ ) )
print(
"""Alternative Password generated:""" , alternative_password_generator(lowercase__ , lowercase__ ) , )
print("""[If you are thinking of using this passsword, You better save it.]""" )
if __name__ == "__main__":
main()
| 28
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 1
|
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[int]=8 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=99 , UpperCAmelCase : List[str]=16 , UpperCAmelCase : int=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Union[str, Any]=36 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[Any]=5_12 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.02 , UpperCAmelCase : str=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Any=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : int = seq_length
lowerCAmelCase_ : Union[str, Any] = is_training
lowerCAmelCase_ : Optional[Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : List[Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Dict = max_position_embeddings
lowerCAmelCase_ : int = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[int] = scope
def A ( self : List[str] ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Dict = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : str ):
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def A ( self : List[Any] ):
lowerCAmelCase_ : Any = self.get_config()
lowerCAmelCase_ : List[str] = 3_00
return config
def A ( self : Tuple ):
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : str = MraModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCAmelCase_ : int = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : Tuple , ):
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Optional[Any] = MraModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , encoder_attention_mask=UpperCAmelCase , )
lowerCAmelCase_ : str = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , encoder_hidden_states=UpperCAmelCase , )
lowerCAmelCase_ : List[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Union[str, Any] = MraForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : List[str] = MraForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Any = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[Any] = MraForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : Tuple = MraForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : int = MraForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Union[str, Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ):
lowerCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : str = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = False
__snake_case : List[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Union[str, Any] = False
__snake_case : Optional[Any] = ()
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = MraModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A ( self : int ):
self.config_tester.run_common_tests()
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : Dict = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A ( self : Tuple ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A ( self : List[str] ):
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = MraModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def A ( self : Any ):
return
@require_torch
class __a ( unittest.TestCase ):
@slow
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCAmelCase_ : Optional[Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ : int = model(UpperCAmelCase )[0]
lowerCAmelCase_ : str = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
lowerCAmelCase_ : Union[str, Any] = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )[0]
lowerCAmelCase_ : int = 5_02_65
lowerCAmelCase_ : int = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
lowerCAmelCase_ : List[str] = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )[0]
lowerCAmelCase_ : List[Any] = 5_02_65
lowerCAmelCase_ : Dict = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 28
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __a :
def __init__( self : Optional[Any] , UpperCAmelCase : str = "cpu" , UpperCAmelCase : str = "openai/clip-vit-large-patch14" ):
lowerCAmelCase_ : Dict = device
lowerCAmelCase_ : List[str] = CLIPTokenizerFast.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : str = [0.4814_5466, 0.457_8275, 0.4082_1073]
lowerCAmelCase_ : Tuple = [0.2686_2954, 0.2613_0258, 0.2757_7711]
lowerCAmelCase_ : List[Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCAmelCase_ : Dict = torchvision.transforms.Resize(2_24 )
lowerCAmelCase_ : List[str] = torchvision.transforms.CenterCrop(2_24 )
def A ( self : Dict , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Tuple = self.resize(UpperCAmelCase )
lowerCAmelCase_ : int = self.center_crop(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self.normalize(UpperCAmelCase )
return images
def __call__( self : Any , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Any=None , **UpperCAmelCase : str ):
lowerCAmelCase_ : Union[str, Any] = self.tokenizer(text=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.preprocess_img(UpperCAmelCase )
lowerCAmelCase_ : Any = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __a ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any]=10 , UpperCAmelCase : int=0.01 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Any=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict="image" , UpperCAmelCase : Any=True , UpperCAmelCase : Tuple=False , UpperCAmelCase : str=False , UpperCAmelCase : int=False , ):
super().__init__()
lowerCAmelCase_ : Optional[Any] = None
lowerCAmelCase_ : str = device if device else get_device()
if vqgan:
lowerCAmelCase_ : Any = vqgan
else:
lowerCAmelCase_ : List[Any] = load_vqgan(self.device , conf_path=UpperCAmelCase , ckpt_path=UpperCAmelCase )
self.vqgan.eval()
if clip:
lowerCAmelCase_ : str = clip
else:
lowerCAmelCase_ : List[Any] = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
lowerCAmelCase_ : Union[str, Any] = ProcessorGradientFlow(device=self.device )
lowerCAmelCase_ : Union[str, Any] = iterations
lowerCAmelCase_ : Any = lr
lowerCAmelCase_ : Optional[Any] = log
lowerCAmelCase_ : Union[str, Any] = make_grid
lowerCAmelCase_ : Optional[Any] = return_val
lowerCAmelCase_ : int = quantize
lowerCAmelCase_ : Tuple = self.vqgan.decoder.z_shape
def A ( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=5 , UpperCAmelCase : Optional[int]=True ):
lowerCAmelCase_ : int = []
if output_path is None:
lowerCAmelCase_ : List[Any] = """./animation.gif"""
if input_path is None:
lowerCAmelCase_ : Tuple = self.save_path
lowerCAmelCase_ : List[str] = sorted(glob(input_path + """/*""" ) )
if not len(UpperCAmelCase ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(UpperCAmelCase ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
lowerCAmelCase_ : List[Any] = total_duration / len(UpperCAmelCase )
lowerCAmelCase_ : Dict = [frame_duration] * len(UpperCAmelCase )
if extend_frames:
lowerCAmelCase_ : str = 1.5
lowerCAmelCase_ : List[str] = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(UpperCAmelCase ) )
imageio.mimsave(UpperCAmelCase , UpperCAmelCase , duration=UpperCAmelCase )
print(F'gif saved to {output_path}' )
def A ( self : Optional[Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None ):
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
lowerCAmelCase_ : Any = preprocess(Image.open(UpperCAmelCase ) , target_image_size=2_56 ).to(self.device )
lowerCAmelCase_ : Union[str, Any] = preprocess_vqgan(UpperCAmelCase )
lowerCAmelCase_ , *lowerCAmelCase_ : Tuple = self.vqgan.encode(UpperCAmelCase )
return z
def A ( self : Tuple , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Dict = self.latent.detach().requires_grad_()
lowerCAmelCase_ : int = base_latent + transform_vector
if self.quantize:
lowerCAmelCase_ , *lowerCAmelCase_ : int = self.vqgan.quantize(UpperCAmelCase )
else:
lowerCAmelCase_ : List[Any] = trans_latent
return self.vqgan.decode(UpperCAmelCase )
def A ( self : int , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None ):
lowerCAmelCase_ : Optional[Any] = self.clip_preprocessor(text=UpperCAmelCase , images=UpperCAmelCase , return_tensors="""pt""" , padding=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self.clip(**UpperCAmelCase )
lowerCAmelCase_ : List[Any] = clip_outputs.logits_per_image
if weights is not None:
lowerCAmelCase_ : Optional[int] = similarity_logits * weights
return similarity_logits.sum()
def A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : Dict = self._get_clip_similarity(pos_prompts["""prompts"""] , UpperCAmelCase , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
lowerCAmelCase_ : List[Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , UpperCAmelCase , weights=neg_prompts["""weights"""] )
else:
lowerCAmelCase_ : Any = torch.tensor([1] , device=self.device )
lowerCAmelCase_ : Any = -torch.log(UpperCAmelCase ) + torch.log(UpperCAmelCase )
return loss
def A ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : List[str] = torch.randn_like(self.latent , requires_grad=UpperCAmelCase , device=self.device )
lowerCAmelCase_ : Any = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCAmelCase_ : str = self._add_vector(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = loop_post_process(UpperCAmelCase )
lowerCAmelCase_ : str = self._get_CLIP_loss(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print("""CLIP loss""" , UpperCAmelCase )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=UpperCAmelCase )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def A ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int ):
wandb.init(reinit=UpperCAmelCase , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
lowerCAmelCase_ : Union[str, Any] = Image.open(UpperCAmelCase )
lowerCAmelCase_ : Dict = image.resize((2_56, 2_56) )
wandb.log("""Original Image""" , wandb.Image(UpperCAmelCase ) )
def A ( self : Tuple , UpperCAmelCase : str ):
if not prompts:
return []
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : Optional[Any] = []
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Optional[int] = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(UpperCAmelCase , (tuple, list) ):
lowerCAmelCase_ : Tuple = prompt[0]
lowerCAmelCase_ : Any = float(prompt[1] )
elif ":" in prompt:
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = prompt.split(""":""" )
lowerCAmelCase_ : Any = float(UpperCAmelCase )
else:
lowerCAmelCase_ : Optional[int] = prompt
lowerCAmelCase_ : Dict = 1.0
processed_prompts.append(UpperCAmelCase )
weights.append(UpperCAmelCase )
return {
"prompts": processed_prompts,
"weights": torch.tensor(UpperCAmelCase , device=self.device ),
}
def A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Dict=None , ):
if image_path:
lowerCAmelCase_ : int = self._get_latent(UpperCAmelCase )
else:
lowerCAmelCase_ : List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCAmelCase_ : Tuple = self.process_prompts(UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.process_prompts(UpperCAmelCase )
if save_final and save_path is None:
lowerCAmelCase_ : Union[str, Any] = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
else:
lowerCAmelCase_ : Optional[Any] = save_path + """_""" + get_timestamp()
os.makedirs(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = save_path
lowerCAmelCase_ : int = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(UpperCAmelCase ) )
lowerCAmelCase_ : List[Any] = loop_post_process(UpperCAmelCase )
for iter, transformed_img in enumerate(self._optimize_CLIP(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ):
if show_intermediate:
show_pil(UpperCAmelCase )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(UpperCAmelCase )} )
if show_final:
show_pil(UpperCAmelCase )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'iter_{iter:03d}_final.png' ) )
| 28
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28
| 1
|
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : list[list[str]] , lowercase__ : int , ) -> None:
'''simple docstring'''
lowerCAmelCase_ : str = len(lowercase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowercase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowercase__ , lowercase__ , )
def __UpperCamelCase ( lowercase__ : int ) -> None:
'''simple docstring'''
lowerCAmelCase_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowercase__ , lowercase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowercase__ )
print("""""" )
print(len(lowercase__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 28
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
| 1
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCAmelCase = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class __a ( unittest.TestCase ):
def A ( self : Optional[int] , UpperCAmelCase : Path , UpperCAmelCase : Union[str, None] = None , UpperCAmelCase : Union[List[str], None] = None , UpperCAmelCase : Union[str, List[str], None] = None , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Union[str, Any] = [file for file in os.listdir(UpperCAmelCase ) if os.path.isfile(os.path.join(UpperCAmelCase , UpperCAmelCase ) )]
if identifier is not None:
lowerCAmelCase_ : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
for n_ in n_identifier:
lowerCAmelCase_ : Tuple = [file for file in files if n_ not in file]
else:
lowerCAmelCase_ : Tuple = [file for file in files if n_identifier not in file]
lowerCAmelCase_ : Optional[Any] = ignore_files or []
ignore_files.append("""__init__.py""" )
lowerCAmelCase_ : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , UpperCAmelCase )
if only_modules:
lowerCAmelCase_ : Any = file.split(""".""" )[0]
try:
lowerCAmelCase_ : Tuple = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = doctest.DocTestSuite(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = unittest.TextTestRunner().run(UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'{module_identifier} is not a module.' )
else:
lowerCAmelCase_ : int = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = Path("""src/transformers""" )
lowerCAmelCase_ : Tuple = """modeling"""
lowerCAmelCase_ : List[str] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase , ignore_files=UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Path("""src/transformers""" )
lowerCAmelCase_ : str = """tokenization"""
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase )
def A ( self : Tuple ):
lowerCAmelCase_ : List[str] = Path("""src/transformers""" )
lowerCAmelCase_ : str = """configuration"""
self.analyze_directory(UpperCAmelCase , identifier=UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : Tuple = Path("""src/transformers""" )
lowerCAmelCase_ : Optional[int] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(UpperCAmelCase , n_identifier=UpperCAmelCase )
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = Path("""docs/source""" )
lowerCAmelCase_ : str = ["""favicon.ico"""]
self.analyze_directory(UpperCAmelCase , ignore_files=UpperCAmelCase , only_modules=UpperCAmelCase )
| 28
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 1
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[int] , UpperCAmelCase : float , UpperCAmelCase : Callable , UpperCAmelCase : int , UpperCAmelCase : float = 1.0 , UpperCAmelCase : str = None , ):
super().__init__()
lowerCAmelCase_ : Optional[int] = initial_learning_rate
lowerCAmelCase_ : Tuple = warmup_steps
lowerCAmelCase_ : Dict = power
lowerCAmelCase_ : List[Any] = decay_schedule_fn
lowerCAmelCase_ : Any = name
def __call__( self : Dict , UpperCAmelCase : Union[str, Any] ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
lowerCAmelCase_ : Any = tf.cast(UpperCAmelCase , tf.floataa )
lowerCAmelCase_ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
lowerCAmelCase_ : Tuple = global_step_float / warmup_steps_float
lowerCAmelCase_ : Tuple = self.initial_learning_rate * tf.math.pow(UpperCAmelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=UpperCAmelCase , )
def A ( self : Any ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int , lowercase__ : int , lowercase__ : float = 0.0 , lowercase__ : float = 0.9 , lowercase__ : float = 0.999 , lowercase__ : float = 1E-8 , lowercase__ : Optional[float] = None , lowercase__ : Optional[float] = None , lowercase__ : float = 0.0 , lowercase__ : float = 1.0 , lowercase__ : Optional[List[str]] = None , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=lowercase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=lowercase__ , )
if num_warmup_steps:
lowerCAmelCase_ : Optional[int] = WarmUp(
initial_learning_rate=lowercase__ , decay_schedule_fn=lowercase__ , warmup_steps=lowercase__ , )
if weight_decay_rate > 0.0:
lowerCAmelCase_ : Union[str, Any] = AdamWeightDecay(
learning_rate=lowercase__ , weight_decay_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=lowercase__ , )
else:
lowerCAmelCase_ : Optional[Any] = tf.keras.optimizers.Adam(
learning_rate=lowercase__ , beta_a=lowercase__ , beta_a=lowercase__ , epsilon=lowercase__ , clipnorm=lowercase__ , global_clipnorm=lowercase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a ( __UpperCamelCase ):
def __init__( self : Dict , UpperCAmelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.999 , UpperCAmelCase : float = 1e-7 , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : Optional[List[str]] = None , UpperCAmelCase : str = "AdamWeightDecay" , **UpperCAmelCase : List[str] , ):
super().__init__(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = weight_decay_rate
lowerCAmelCase_ : Tuple = include_in_weight_decay
lowerCAmelCase_ : Optional[Any] = exclude_from_weight_decay
@classmethod
def A ( cls : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = {"""WarmUp""": WarmUp}
return super(UpperCAmelCase , cls ).from_config(UpperCAmelCase , custom_objects=UpperCAmelCase )
def A ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ):
super(UpperCAmelCase , self )._prepare_local(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Tuple ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = list(zip(*UpperCAmelCase ) )
return super(UpperCAmelCase , self ).apply_gradients(zip(UpperCAmelCase , UpperCAmelCase ) , name=UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
lowerCAmelCase_ : Dict = apply_state or {}
lowerCAmelCase_ : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
lowerCAmelCase_ : Any = self._fallback_apply_state(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def A ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : str=None ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_dense(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any=None ):
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self._decay_weights_op(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
with tf.control_dependencies([decay] ):
return super(UpperCAmelCase , self )._resource_apply_sparse(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Tuple = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def A ( self : int , UpperCAmelCase : Union[str, Any] ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(UpperCAmelCase , UpperCAmelCase ) is not None:
return False
return True
class __a ( __UpperCamelCase ):
def __init__( self : List[Any] ):
lowerCAmelCase_ : str = []
lowerCAmelCase_ : List[Any] = None
@property
def A ( self : Any ):
if self._accum_steps is None:
lowerCAmelCase_ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def A ( self : Tuple ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , UpperCAmelCase : List[Any] ):
if not self._gradients:
lowerCAmelCase_ : str = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(UpperCAmelCase ) , trainable=UpperCAmelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(UpperCAmelCase ) != len(self._gradients ):
raise ValueError(F'Expected {len(self._gradients )} gradients, but got {len(UpperCAmelCase )}' )
for accum_gradient, gradient in zip(self._gradients , UpperCAmelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(UpperCAmelCase )
self._accum_steps.assign_add(1 )
def A ( self : Dict ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(UpperCAmelCase ) )
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__UpperCamelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
requires_backends(self , """decord""" )
self.check_model_type(UpperCAmelCase )
def A ( self : Tuple , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=None ):
lowerCAmelCase_ : List[str] = {}
if frame_sampling_rate is not None:
lowerCAmelCase_ : str = frame_sampling_rate
if num_frames is not None:
lowerCAmelCase_ : Tuple = num_frames
lowerCAmelCase_ : Dict = {}
if top_k is not None:
lowerCAmelCase_ : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , UpperCAmelCase : Union[str, List[str]] , **UpperCAmelCase : Dict ):
return super().__call__(UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple=1 ):
if num_frames is None:
lowerCAmelCase_ : Optional[int] = self.model.config.num_frames
if video.startswith("""http://""" ) or video.startswith("""https://""" ):
lowerCAmelCase_ : Union[str, Any] = BytesIO(requests.get(UpperCAmelCase ).content )
lowerCAmelCase_ : Any = VideoReader(UpperCAmelCase )
videoreader.seek(0 )
lowerCAmelCase_ : List[Any] = 0
lowerCAmelCase_ : Union[str, Any] = num_frames * frame_sampling_rate - 1
lowerCAmelCase_ : Optional[int] = np.linspace(UpperCAmelCase , UpperCAmelCase , num=UpperCAmelCase , dtype=np.intaa )
lowerCAmelCase_ : Optional[Any] = videoreader.get_batch(UpperCAmelCase ).asnumpy()
lowerCAmelCase_ : List[str] = list(UpperCAmelCase )
lowerCAmelCase_ : Any = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
return model_inputs
def A ( self : Dict , UpperCAmelCase : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model(**UpperCAmelCase )
return model_outputs
def A ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : int=5 ):
if top_k > self.model.config.num_labels:
lowerCAmelCase_ : Optional[int] = self.model.config.num_labels
if self.framework == "pt":
lowerCAmelCase_ : Any = model_outputs.logits.softmax(-1 )[0]
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = probs.topk(UpperCAmelCase )
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowerCAmelCase_ : List[Any] = scores.tolist()
lowerCAmelCase_ : List[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase , UpperCAmelCase )]
| 28
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28
| 1
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
| 28
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ : Any = precision
lowerCAmelCase_ : Any = ceil(precision / 14 )
lowerCAmelCase_ : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Optional[int] = 13591409
lowerCAmelCase_ : Union[str, Any] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 28
| 1
|
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : int ):
warnings.warn(
"""The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ChineseCLIPImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28
| 1
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
__UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __UpperCamelCase ( lowercase__ : int , lowercase__ : tuple , lowercase__ : Path , lowercase__ : Optional[Any] , lowercase__ : int , lowercase__ : str , lowercase__ : str , lowercase__ : Optional[int]=False , ) -> Optional[int]:
'''simple docstring'''
output_path.parent.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , use_external_data_format=lowercase__ , enable_onnx_checker=lowercase__ , opset_version=lowercase__ , )
else:
export(
lowercase__ , lowercase__ , f=output_path.as_posix() , input_names=lowercase__ , output_names=lowercase__ , dynamic_axes=lowercase__ , do_constant_folding=lowercase__ , opset_version=lowercase__ , )
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : str , lowercase__ : int , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase_ : Union[str, Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCAmelCase_ : Any = """cpu"""
lowerCAmelCase_ : Any = StableDiffusionPipeline.from_pretrained(lowercase__ , torch_dtype=lowercase__ ).to(lowercase__ )
lowerCAmelCase_ : int = Path(lowercase__ )
# TEXT ENCODER
lowerCAmelCase_ : int = pipeline.text_encoder.config.max_position_embeddings
lowerCAmelCase_ : int = pipeline.text_encoder.config.hidden_size
lowerCAmelCase_ : List[Any] = pipeline.tokenizer(
"""A sample prompt""" , padding="""max_length""" , max_length=pipeline.tokenizer.model_max_length , truncation=lowercase__ , return_tensors="""pt""" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowercase__ , dtype=torch.intaa )) , output_path=output_path / """text_encoder""" / """model.onnx""" , ordered_input_names=["""input_ids"""] , output_names=["""last_hidden_state""", """pooler_output"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """sequence"""},
} , opset=lowercase__ , )
del pipeline.text_encoder
# UNET
lowerCAmelCase_ : Optional[Any] = pipeline.unet.config.in_channels
lowerCAmelCase_ : Union[str, Any] = pipeline.unet.config.sample_size
lowerCAmelCase_ : List[Any] = output_path / """unet""" / """model.onnx"""
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
torch.randn(2 ).to(device=lowercase__ , dtype=lowercase__ ),
torch.randn(2 , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=lowercase__ , ordered_input_names=["""sample""", """timestep""", """encoder_hidden_states""", """return_dict"""] , output_names=["""out_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""timestep""": {0: """batch"""},
"""encoder_hidden_states""": {0: """batch""", 1: """sequence"""},
} , opset=lowercase__ , use_external_data_format=lowercase__ , )
lowerCAmelCase_ : Union[str, Any] = str(unet_path.absolute().as_posix() )
lowerCAmelCase_ : int = os.path.dirname(lowercase__ )
lowerCAmelCase_ : Tuple = onnx.load(lowercase__ )
# clean up existing tensor files
shutil.rmtree(lowercase__ )
os.mkdir(lowercase__ )
# collate external tensor files into one
onnx.save_model(
lowercase__ , lowercase__ , save_as_external_data=lowercase__ , all_tensors_to_one_file=lowercase__ , location="""weights.pb""" , convert_attribute=lowercase__ , )
del pipeline.unet
# VAE ENCODER
lowerCAmelCase_ : int = pipeline.vae
lowerCAmelCase_ : Tuple = vae_encoder.config.in_channels
lowerCAmelCase_ : List[str] = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCAmelCase_ : List[str] = lambda lowercase__ , lowercase__ : vae_encoder.encode(lowercase__ , lowercase__ )[0].sample()
onnx_export(
lowercase__ , model_args=(
torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=output_path / """vae_encoder""" / """model.onnx""" , ordered_input_names=["""sample""", """return_dict"""] , output_names=["""latent_sample"""] , dynamic_axes={
"""sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowercase__ , )
# VAE DECODER
lowerCAmelCase_ : int = pipeline.vae
lowerCAmelCase_ : int = vae_decoder.config.latent_channels
lowerCAmelCase_ : Dict = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCAmelCase_ : Union[str, Any] = vae_encoder.decode
onnx_export(
lowercase__ , model_args=(
torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=lowercase__ , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCAmelCase_ : Union[str, Any] = pipeline.safety_checker
lowerCAmelCase_ : Tuple = safety_checker.config.vision_config.num_channels
lowerCAmelCase_ : Union[str, Any] = safety_checker.config.vision_config.image_size
lowerCAmelCase_ : List[Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowercase__ , lowercase__ , lowercase__ , ).to(device=lowercase__ , dtype=lowercase__ ),
torch.randn(1 , lowercase__ , lowercase__ , lowercase__ ).to(device=lowercase__ , dtype=lowercase__ ),
) , output_path=output_path / """safety_checker""" / """model.onnx""" , ordered_input_names=["""clip_input""", """images"""] , output_names=["""out_images""", """has_nsfw_concepts"""] , dynamic_axes={
"""clip_input""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
"""images""": {0: """batch""", 1: """height""", 2: """width""", 3: """channels"""},
} , opset=lowercase__ , )
del pipeline.safety_checker
lowerCAmelCase_ : Dict = OnnxRuntimeModel.from_pretrained(output_path / """safety_checker""" )
lowerCAmelCase_ : Optional[int] = pipeline.feature_extractor
else:
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : Tuple = None
lowerCAmelCase_ : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_encoder""" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / """vae_decoder""" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / """text_encoder""" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / """unet""" ) , scheduler=pipeline.scheduler , safety_checker=lowercase__ , feature_extractor=lowercase__ , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowercase__ )
print("""ONNX pipeline saved to""" , lowercase__ )
del pipeline
del onnx_pipeline
lowerCAmelCase_ : int = OnnxStableDiffusionPipeline.from_pretrained(lowercase__ , provider="""CPUExecutionProvider""" )
print("""ONNX pipeline is loadable""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__UpperCAmelCase = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 28
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[str]=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : Tuple = """"""
else:
lowerCAmelCase_ : Tuple = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : List[str] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : Optional[Any] = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Any = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ : int = dct.pop(lowercase__ )
lowerCAmelCase_ : Dict = val
def __UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[int] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : int ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = ViTConfig()
lowerCAmelCase_ : List[Any] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[Any] = int(vit_name[-12:-10] )
lowerCAmelCase_ : Dict = int(vit_name[-9:-6] )
else:
lowerCAmelCase_ : Tuple = 1000
lowerCAmelCase_ : Dict = """huggingface/label-files"""
lowerCAmelCase_ : Any = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : int = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = idalabel
lowerCAmelCase_ : int = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = int(vit_name[-6:-4] )
lowerCAmelCase_ : str = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
lowerCAmelCase_ : List[str] = 192
lowerCAmelCase_ : Optional[int] = 768
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : Dict = 3
elif vit_name[9:].startswith("""small""" ):
lowerCAmelCase_ : Optional[int] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Optional[Any] = 12
lowerCAmelCase_ : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
lowerCAmelCase_ : List[Any] = 768
lowerCAmelCase_ : Tuple = 2304
lowerCAmelCase_ : int = 8
lowerCAmelCase_ : Optional[int] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
lowerCAmelCase_ : Tuple = 1024
lowerCAmelCase_ : Optional[int] = 4096
lowerCAmelCase_ : Any = 24
lowerCAmelCase_ : Dict = 16
elif vit_name[4:].startswith("""huge""" ):
lowerCAmelCase_ : str = 1280
lowerCAmelCase_ : Optional[Any] = 5120
lowerCAmelCase_ : Tuple = 32
lowerCAmelCase_ : Dict = 16
# load original model from timm
lowerCAmelCase_ : int = timm.create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Dict = timm_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Optional[Any] = create_rename_keys(lowercase__ , lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowerCAmelCase_ : List[str] = ViTModel(lowercase__ ).eval()
else:
lowerCAmelCase_ : Any = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowerCAmelCase_ : List[str] = DeiTImageProcessor(size=config.image_size )
else:
lowerCAmelCase_ : List[str] = ViTImageProcessor(size=config.image_size )
lowerCAmelCase_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = encoding["""pixel_values"""]
lowerCAmelCase_ : Dict = model(lowercase__ )
if base_model:
lowerCAmelCase_ : str = timm_model.forward_features(lowercase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(lowercase__ , outputs.pooler_output , atol=1E-3 )
else:
lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 28
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28
| 1
|
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__UpperCAmelCase = logging.get_logger(__name__)
# General docstring
__UpperCAmelCase = 'ResNetConfig'
# Base docstring
__UpperCAmelCase = 'microsoft/resnet-50'
__UpperCAmelCase = [1, 20_48, 7, 7]
# Image classification docstring
__UpperCAmelCase = 'microsoft/resnet-50'
__UpperCAmelCase = 'tiger cat'
__UpperCAmelCase = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a ( nn.Module ):
def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ):
super().__init__()
lowerCAmelCase_ : List[Any] = nn.Convad(
UpperCAmelCase , UpperCAmelCase , kernel_size=UpperCAmelCase , stride=UpperCAmelCase , padding=kernel_size // 2 , bias=UpperCAmelCase )
lowerCAmelCase_ : str = nn.BatchNormad(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def A ( self : Tuple , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Optional[Any] = self.convolution(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = self.normalization(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = self.activation(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
lowerCAmelCase_ : List[Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
lowerCAmelCase_ : List[Any] = config.num_channels
def A ( self : Dict , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Optional[Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowerCAmelCase_ : Any = self.embedder(UpperCAmelCase )
lowerCAmelCase_ : Dict = self.pooler(UpperCAmelCase )
return embedding
class __a ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 ):
super().__init__()
lowerCAmelCase_ : Union[str, Any] = nn.Convad(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , stride=UpperCAmelCase , bias=UpperCAmelCase )
lowerCAmelCase_ : Tuple = nn.BatchNormad(UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Any = self.convolution(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = self.normalization(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" ):
super().__init__()
lowerCAmelCase_ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : str = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : List[Any] = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , activation=UpperCAmelCase ) , )
lowerCAmelCase_ : int = ACTaFN[activation]
def A ( self : List[Any] , UpperCAmelCase : str ):
lowerCAmelCase_ : List[Any] = hidden_state
lowerCAmelCase_ : List[str] = self.layer(UpperCAmelCase )
lowerCAmelCase_ : str = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowerCAmelCase_ : Dict = self.activation(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "relu" , UpperCAmelCase : int = 4 ):
super().__init__()
lowerCAmelCase_ : Optional[int] = in_channels != out_channels or stride != 1
lowerCAmelCase_ : Dict = out_channels // reduction
lowerCAmelCase_ : Optional[Any] = (
ResNetShortCut(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
lowerCAmelCase_ : List[str] = nn.Sequential(
ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase ) , ResNetConvLayer(UpperCAmelCase , UpperCAmelCase , kernel_size=1 , activation=UpperCAmelCase ) , )
lowerCAmelCase_ : List[str] = ACTaFN[activation]
def A ( self : Tuple , UpperCAmelCase : Any ):
lowerCAmelCase_ : Any = hidden_state
lowerCAmelCase_ : Optional[Any] = self.layer(UpperCAmelCase )
lowerCAmelCase_ : str = self.shortcut(UpperCAmelCase )
hidden_state += residual
lowerCAmelCase_ : Tuple = self.activation(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : List[str] , UpperCAmelCase : ResNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , ):
super().__init__()
lowerCAmelCase_ : Dict = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
lowerCAmelCase_ : Union[str, Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(UpperCAmelCase , UpperCAmelCase , stride=UpperCAmelCase , activation=config.hidden_act ) , *[layer(UpperCAmelCase , UpperCAmelCase , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def A ( self : List[Any] , UpperCAmelCase : Tensor ):
lowerCAmelCase_ : Optional[Any] = input
for layer in self.layers:
lowerCAmelCase_ : Dict = layer(UpperCAmelCase )
return hidden_state
class __a ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase : ResNetConfig ):
super().__init__()
lowerCAmelCase_ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowerCAmelCase_ : str = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(UpperCAmelCase , config.depths[1:] ):
self.stages.append(ResNetStage(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , depth=UpperCAmelCase ) )
def A ( self : Optional[Any] , UpperCAmelCase : Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ):
lowerCAmelCase_ : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase_ : List[str] = hidden_states + (hidden_state,)
lowerCAmelCase_ : Union[str, Any] = stage_module(UpperCAmelCase )
if output_hidden_states:
lowerCAmelCase_ : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCAmelCase , hidden_states=UpperCAmelCase , )
class __a ( __UpperCamelCase ):
__snake_case : Dict = ResNetConfig
__snake_case : str = """resnet"""
__snake_case : List[Any] = """pixel_values"""
__snake_case : Optional[int] = True
def A ( self : str , UpperCAmelCase : Dict ):
if isinstance(UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict=False ):
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : str = value
__UpperCAmelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__UpperCAmelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ):
def __init__( self : Dict , UpperCAmelCase : str ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : Dict = config
lowerCAmelCase_ : Optional[int] = ResNetEmbeddings(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = ResNetEncoder(UpperCAmelCase )
lowerCAmelCase_ : Any = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A ( self : List[Any] , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ):
lowerCAmelCase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : List[str] = self.embedder(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = self.encoder(
UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = encoder_outputs[0]
lowerCAmelCase_ : Dict = self.pooler(UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase , pooler_output=UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[int] ):
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : str = config.num_labels
lowerCAmelCase_ : Any = ResNetModel(UpperCAmelCase )
# classification head
lowerCAmelCase_ : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.LongTensor] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : str = self.resnet(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase_ : Any = self.classifier(UpperCAmelCase )
lowerCAmelCase_ : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowerCAmelCase_ : int = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowerCAmelCase_ : Dict = """single_label_classification"""
else:
lowerCAmelCase_ : Tuple = """multi_label_classification"""
if self.config.problem_type == "regression":
lowerCAmelCase_ : Tuple = MSELoss()
if self.num_labels == 1:
lowerCAmelCase_ : Any = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowerCAmelCase_ : int = loss_fct(UpperCAmelCase , UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowerCAmelCase_ : Tuple = CrossEntropyLoss()
lowerCAmelCase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowerCAmelCase_ : Union[str, Any] = BCEWithLogitsLoss()
lowerCAmelCase_ : Optional[Any] = loss_fct(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
lowerCAmelCase_ : Any = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCAmelCase , logits=UpperCAmelCase , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" ,__UpperCamelCase ,)
class __a ( __UpperCamelCase ,__UpperCamelCase ):
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] ):
super().__init__(UpperCAmelCase )
super()._init_backbone(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [config.embedding_size] + config.hidden_sizes
lowerCAmelCase_ : List[str] = ResNetEmbeddings(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = ResNetEncoder(UpperCAmelCase )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase )
@replace_return_docstrings(output_type=UpperCAmelCase , config_class=_CONFIG_FOR_DOC )
def A ( self : List[str] , UpperCAmelCase : Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None ):
lowerCAmelCase_ : Dict = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Union[str, Any] = self.embedder(UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.encoder(UpperCAmelCase , output_hidden_states=UpperCAmelCase , return_dict=UpperCAmelCase )
lowerCAmelCase_ : int = outputs.hidden_states
lowerCAmelCase_ : Optional[int] = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
lowerCAmelCase_ : Tuple = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=UpperCAmelCase , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=UpperCAmelCase , )
| 28
|
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __a ( __UpperCamelCase ):
__snake_case : int = """openai/whisper-base"""
__snake_case : Optional[int] = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
__snake_case : List[str] = """transcriber"""
__snake_case : str = WhisperProcessor
__snake_case : Optional[Any] = WhisperForConditionalGeneration
__snake_case : List[Any] = ["""audio"""]
__snake_case : List[str] = ["""text"""]
def A ( self : int , UpperCAmelCase : List[str] ):
return self.pre_processor(UpperCAmelCase , return_tensors="""pt""" ).input_features
def A ( self : Union[str, Any] , UpperCAmelCase : Tuple ):
return self.model.generate(inputs=UpperCAmelCase )
def A ( self : Optional[int] , UpperCAmelCase : List[str] ):
return self.pre_processor.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )[0]
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 1
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : List[str] = ["""image_processor""", """tokenizer"""]
__snake_case : Any = """LayoutLMv2ImageProcessor"""
__snake_case : Optional[Any] = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Union[str, Any] ):
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCAmelCase , )
lowerCAmelCase_ : List[str] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
lowerCAmelCase_ : List[Any] = self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase_ : Any = features["""words"""]
lowerCAmelCase_ : str = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowerCAmelCase_ : List[Any] = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
lowerCAmelCase_ : Optional[Any] = self.get_overflowing_images(UpperCAmelCase , encoded_inputs["""overflow_to_sample_mapping"""] )
lowerCAmelCase_ : List[str] = images
return encoded_inputs
def A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCAmelCase_ : List[str] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A ( self : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : str ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A ( self : Dict ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCAmelCase , )
return self.image_processor_class
@property
def A ( self : List[str] ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCAmelCase , )
return self.image_processor
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
| 1
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
|
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28
| 1
|
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Dict , lowercase__ : Union[str, Any]=1E-12 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
lowerCAmelCase_ : str = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowercase__ , axis=1 ) , a_min=lowercase__ ) ).T
return jnp.matmul(lowercase__ , norm_emb_a.T )
class __a ( nn.Module ):
__snake_case : CLIPConfig
__snake_case : jnp.dtype = jnp.floataa
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase_ : Optional[int] = nn.Dense(self.config.projection_dim , use_bias=UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase_ : Dict = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
lowerCAmelCase_ : List[Any] = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase_ : List[Any] = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) )
lowerCAmelCase_ : Any = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) )
def __call__( self : List[str] , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Any = self.vision_model(UpperCAmelCase )[1]
lowerCAmelCase_ : List[str] = self.visual_projection(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = jax_cosine_distance(UpperCAmelCase , self.special_care_embeds )
lowerCAmelCase_ : int = jax_cosine_distance(UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase_ : Any = 0.0
lowerCAmelCase_ : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase_ : Union[str, Any] = jnp.round(UpperCAmelCase , 3 )
lowerCAmelCase_ : Any = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase_ : Dict = is_special_care * 0.01
lowerCAmelCase_ : Optional[int] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase_ : Optional[int] = jnp.round(UpperCAmelCase , 3 )
lowerCAmelCase_ : Tuple = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = CLIPConfig
__snake_case : Union[str, Any] = """clip_input"""
__snake_case : Any = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Any , UpperCAmelCase : CLIPConfig , UpperCAmelCase : Optional[Tuple] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : jnp.dtype = jnp.floataa , UpperCAmelCase : bool = True , **UpperCAmelCase : Dict , ):
if input_shape is None:
lowerCAmelCase_ : int = (1, 2_24, 2_24, 3)
lowerCAmelCase_ : Optional[Any] = self.module_class(config=UpperCAmelCase , dtype=UpperCAmelCase , **UpperCAmelCase )
super().__init__(UpperCAmelCase , UpperCAmelCase , input_shape=UpperCAmelCase , seed=UpperCAmelCase , dtype=UpperCAmelCase , _do_init=_do_init )
def A ( self : Tuple , UpperCAmelCase : jax.random.KeyArray , UpperCAmelCase : Tuple , UpperCAmelCase : FrozenDict = None ):
# init input tensor
lowerCAmelCase_ : Optional[Any] = jax.random.normal(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = jax.random.split(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
lowerCAmelCase_ : Dict = self.module.init(UpperCAmelCase , UpperCAmelCase )["""params"""]
return random_params
def __call__( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : dict = None , ):
lowerCAmelCase_ : Dict = jnp.transpose(UpperCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"""params""": params or self.params} , jnp.array(UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 28
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __a ( __UpperCamelCase ):
__snake_case : int = """facebook/nllb-200-distilled-600M"""
__snake_case : Optional[int] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__snake_case : str = """translator"""
__snake_case : Any = AutoTokenizer
__snake_case : Union[str, Any] = AutoModelForSeqaSeqLM
__snake_case : Optional[int] = LANGUAGE_CODES
__snake_case : int = ["""text""", """text""", """text"""]
__snake_case : str = ["""text"""]
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
lowerCAmelCase_ : List[Any] = self.lang_to_code[src_lang]
lowerCAmelCase_ : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase , return_tensors="""pt""" , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : str ):
return self.model.generate(**UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase )
| 28
| 1
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {}
__UpperCAmelCase = {}
__UpperCAmelCase = {}
def __UpperCamelCase ( lowercase__ : type , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None , ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' )
lowerCAmelCase_ : str = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' )
lowerCAmelCase_ : List[Any] = format_type
def __UpperCamelCase ( lowercase__ : Exception , lowercase__ : Optional[str] , lowercase__ : Optional[List[str]] = None ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowerCAmelCase_ : Dict = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['python'])
_register_formatter(ArrowFormatter, 'arrow', aliases=['pa', 'pyarrow'])
_register_formatter(NumpyFormatter, 'numpy', aliases=['np'])
_register_formatter(PandasFormatter, 'pandas', aliases=['pd'])
_register_formatter(CustomFormatter, 'custom')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, 'torch', aliases=['pt', 'pytorch'])
else:
__UpperCAmelCase = ValueError('PyTorch needs to be installed to be able to return PyTorch tensors.')
_register_unavailable_formatter(_torch_error, 'torch', aliases=['pt', 'pytorch'])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, 'tensorflow', aliases=['tf'])
else:
__UpperCAmelCase = ValueError('Tensorflow needs to be installed to be able to return Tensorflow tensors.')
_register_unavailable_formatter(_tf_error, 'tensorflow', aliases=['tf'])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, 'jax', aliases=[])
else:
__UpperCAmelCase = ValueError('JAX needs to be installed to be able to return JAX arrays.')
_register_unavailable_formatter(_jax_error, 'jax', aliases=[])
def __UpperCamelCase ( lowercase__ : Optional[str] ) -> Optional[str]:
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCamelCase ( lowercase__ : Optional[str] , **lowercase__ : List[Any] ) -> Formatter:
'''simple docstring'''
lowerCAmelCase_ : int = get_format_type_from_alias(lowercase__ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowercase__ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase_ : Tuple = BitConfig(
conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase_ : Dict = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase_ : List[str] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase_ : Any = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_config(lowercase__ )
# load original model from timm
lowerCAmelCase_ : str = create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase_ : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase_ : List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase_ : Dict = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase_ : Tuple = BitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# create image processor
lowerCAmelCase_ : Tuple = create_transform(**resolve_data_config({} , model=lowercase__ ) )
lowerCAmelCase_ : Union[str, Any] = transform.transforms
lowerCAmelCase_ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase_ : List[str] = BitImageProcessor(
do_resize=lowercase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = transform(lowercase__ ).unsqueeze(0 )
lowerCAmelCase_ : List[str] = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(lowercase__ )
lowerCAmelCase_ : List[str] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 1
|
import math
import tensorflow as tf
from packaging import version
def __UpperCamelCase ( lowercase__ : Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = tf.convert_to_tensor(lowercase__ )
lowerCAmelCase_ : Dict = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def __UpperCamelCase ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Dict = tf.convert_to_tensor(lowercase__ )
lowerCAmelCase_ : str = tf.cast(math.pi , x.dtype )
lowerCAmelCase_ : List[Any] = tf.cast(0.044715 , x.dtype )
lowerCAmelCase_ : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase__ , 3 )) ))
return x * cdf
def __UpperCamelCase ( lowercase__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor(lowercase__ )
return x * tf.tanh(tf.math.softplus(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = tf.convert_to_tensor(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = tf.cast(0.044715 , x.dtype )
lowerCAmelCase_ : List[Any] = tf.cast(0.7978845608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def __UpperCamelCase ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = tf.convert_to_tensor(lowercase__ )
lowerCAmelCase_ : List[str] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def __UpperCamelCase ( lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(lowercase__ ) , -10 , 10 )
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any]=-1 ) -> str:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Any = tf.split(lowercase__ , 2 , axis=lowercase__ )
return a * tf.math.sigmoid(lowercase__ )
if version.parse(tf.version.VERSION) >= version.parse('2.4'):
def __UpperCamelCase ( lowercase__ : Dict ) -> List[Any]:
'''simple docstring'''
return tf.keras.activations.gelu(lowercase__ , approximate=lowercase__ )
__UpperCAmelCase = tf.keras.activations.gelu
__UpperCAmelCase = approximate_gelu_wrap
else:
__UpperCAmelCase = _gelu
__UpperCAmelCase = _gelu_new
__UpperCAmelCase = {
'gelu': gelu,
'gelu_10': gelu_aa,
'gelu_fast': gelu_fast,
'gelu_new': gelu_new,
'glu': glu,
'mish': mish,
'quick_gelu': quick_gelu,
'relu': tf.keras.activations.relu,
'sigmoid': tf.keras.activations.sigmoid,
'silu': tf.keras.activations.swish,
'swish': tf.keras.activations.swish,
'tanh': tf.keras.activations.tanh,
}
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 28
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28
| 1
|
from __future__ import annotations
import math
class __a :
def __init__( self : str , UpperCAmelCase : int ):
lowerCAmelCase_ : Union[str, Any] = size
# approximate the overall size of segment tree with given value
lowerCAmelCase_ : List[str] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCAmelCase_ : Optional[int] = [0 for i in range(0 , 4 * size )]
lowerCAmelCase_ : List[Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def A ( self : Optional[Any] , UpperCAmelCase : int ):
return idx * 2
def A ( self : int , UpperCAmelCase : int ):
return idx * 2 + 1
def A ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[int] ):
if left_element == right_element:
lowerCAmelCase_ : int = a[left_element - 1]
else:
lowerCAmelCase_ : Any = (left_element + right_element) // 2
self.build(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.build(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = max(
self.segment_tree[self.left(UpperCAmelCase )] , self.segment_tree[self.right(UpperCAmelCase )] )
def A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Optional[int] = self.lazy[idx]
lowerCAmelCase_ : Tuple = False
if left_element != right_element:
lowerCAmelCase_ : List[str] = self.lazy[idx]
lowerCAmelCase_ : int = self.lazy[idx]
lowerCAmelCase_ : int = True
lowerCAmelCase_ : str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCAmelCase_ : List[Any] = val
if left_element != right_element:
lowerCAmelCase_ : List[Any] = val
lowerCAmelCase_ : Optional[int] = val
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Optional[int] = True
return True
lowerCAmelCase_ : Optional[Any] = (left_element + right_element) // 2
self.update(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.update(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = max(
self.segment_tree[self.left(UpperCAmelCase )] , self.segment_tree[self.right(UpperCAmelCase )] )
return True
def A ( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if self.flag[idx] is True:
lowerCAmelCase_ : Dict = self.lazy[idx]
lowerCAmelCase_ : Optional[Any] = False
if left_element != right_element:
lowerCAmelCase_ : List[str] = self.lazy[idx]
lowerCAmelCase_ : List[str] = self.lazy[idx]
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCAmelCase_ : Dict = (left_element + right_element) // 2
lowerCAmelCase_ : str = self.query(self.left(UpperCAmelCase ) , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = self.query(self.right(UpperCAmelCase ) , mid + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return max(UpperCAmelCase , UpperCAmelCase )
def __str__( self : int ):
return str([self.query(1 , 1 , self.size , UpperCAmelCase , UpperCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__UpperCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__UpperCAmelCase = 15
__UpperCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 1_11)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 2_35)
print(segt)
| 28
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : Any = ["""image_processor""", """tokenizer"""]
__snake_case : Tuple = """BlipImageProcessor"""
__snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : str = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase_ : str = self.tokenizer
lowerCAmelCase_ : List[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
lowerCAmelCase_ : int = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : int ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28
| 1
|
import math
__UpperCAmelCase = 10
__UpperCAmelCase = 7
__UpperCAmelCase = BALLS_PER_COLOUR * NUM_COLOURS
def __UpperCamelCase ( lowercase__ : int = 20 ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = math.comb(lowercase__ , lowercase__ )
lowerCAmelCase_ : List[Any] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowercase__ )
lowerCAmelCase_ : Optional[Any] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase = False
@skip_mps
class __a ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = StableDiffusionAttendAndExcitePipeline
__snake_case : Any = False
__snake_case : Optional[int] = TEXT_TO_IMAGE_PARAMS
__snake_case : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
__snake_case : str = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A ( cls : str ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
@classmethod
def A ( cls : Tuple ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
def A ( self : List[Any] ):
torch.manual_seed(0 )
lowerCAmelCase_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowerCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCAmelCase_ : int = CLIPTextModel(UpperCAmelCase )
lowerCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCAmelCase_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A ( self : Tuple , UpperCAmelCase : Any , UpperCAmelCase : int=0 ):
if str(UpperCAmelCase ).startswith("""mps""" ):
lowerCAmelCase_ : Dict = torch.manual_seed(UpperCAmelCase )
else:
lowerCAmelCase_ : Any = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCAmelCase_ : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def A ( self : List[str] ):
lowerCAmelCase_ : int = """cpu"""
lowerCAmelCase_ : int = self.get_dummy_components()
lowerCAmelCase_ : int = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Dict = self.get_dummy_inputs(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = pipe(**UpperCAmelCase ).images
lowerCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase_ : Optional[Any] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
lowerCAmelCase_ : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def A ( self : List[Any] ):
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def A ( self : Tuple ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A ( self : Optional[int] ):
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def A ( self : str ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A ( self : Union[str, Any] ):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def A ( self : List[str] ):
super().test_save_load_local(expected_max_difference=5e-4 )
def A ( self : List[str] ):
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __a ( unittest.TestCase ):
@classmethod
def A ( cls : List[str] ):
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
@classmethod
def A ( cls : List[str] ):
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase )
def A ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Dict = torch.manual_seed(51 )
lowerCAmelCase_ : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCAmelCase_ : Optional[Any] = """a painting of an elephant with glasses"""
lowerCAmelCase_ : int = [5, 7]
lowerCAmelCase_ : Dict = pipe(
prompt=UpperCAmelCase , token_indices=UpperCAmelCase , guidance_scale=7.5 , generator=UpperCAmelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCAmelCase_ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 28
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 1
|
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase = (7_20, 12_80) # Height, Width
__UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase = 1 / 1_00
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = 2_50
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
lowerCAmelCase_ : str = random.sample(range(len(lowercase__ ) ) , 4 )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase_ : Tuple = random_chars(32 )
lowerCAmelCase_ : Tuple = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase_ : Dict = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
lowerCAmelCase_ : List[Any] = []
for anno in new_annos:
lowerCAmelCase_ : Union[str, Any] = anno[3] - anno[1]
lowerCAmelCase_ : List[Any] = anno[4] - anno[2]
lowerCAmelCase_ : Tuple = anno[1] + width / 2
lowerCAmelCase_ : Union[str, Any] = anno[2] + height / 2
lowerCAmelCase_ : Dict = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(lowercase__ )
with open(f'{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : str ) -> tuple[list, list]:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
lowerCAmelCase_ : List[str] = []
for label_file in glob.glob(os.path.join(lowercase__ , """*.txt""" ) ):
lowerCAmelCase_ : int = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(lowercase__ ) as in_file:
lowerCAmelCase_ : List[Any] = in_file.readlines()
lowerCAmelCase_ : Dict = os.path.join(lowercase__ , f'{label_name}.jpg' )
lowerCAmelCase_ : Optional[int] = []
for obj_list in obj_lists:
lowerCAmelCase_ : Tuple = obj_list.rstrip("""\n""" ).split(""" """ )
lowerCAmelCase_ : Any = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase_ : int = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase_ : List[Any] = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase_ : Any = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def __UpperCamelCase ( lowercase__ : list , lowercase__ : list , lowercase__ : list[int] , lowercase__ : tuple[int, int] , lowercase__ : tuple[float, float] , lowercase__ : float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase_ : Dict = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ : int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ : Union[str, Any] = int(scale_x * output_size[1] )
lowerCAmelCase_ : Optional[int] = int(scale_y * output_size[0] )
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Union[str, Any] = []
for i, index in enumerate(lowercase__ ):
lowerCAmelCase_ : Any = all_img_list[index]
path_list.append(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = all_annos[index]
lowerCAmelCase_ : List[str] = cva.imread(lowercase__ )
if i == 0: # top-left
lowerCAmelCase_ : int = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
lowerCAmelCase_ : Union[str, Any] = img
for bbox in img_annos:
lowerCAmelCase_ : Optional[int] = bbox[1] * scale_x
lowerCAmelCase_ : Optional[int] = bbox[2] * scale_y
lowerCAmelCase_ : int = bbox[3] * scale_x
lowerCAmelCase_ : Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase_ : Union[str, Any] = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase_ : str = img
for bbox in img_annos:
lowerCAmelCase_ : str = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ : Tuple = bbox[2] * scale_y
lowerCAmelCase_ : List[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase_ : str = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase_ : List[str] = img
for bbox in img_annos:
lowerCAmelCase_ : Any = bbox[1] * scale_x
lowerCAmelCase_ : Union[str, Any] = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ : List[str] = bbox[3] * scale_x
lowerCAmelCase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase_ : Dict = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase_ : List[str] = img
for bbox in img_annos:
lowerCAmelCase_ : Any = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ : Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ : int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase_ : Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase_ : int = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 28
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str]=3 , UpperCAmelCase : int=32 , UpperCAmelCase : int=3 , UpperCAmelCase : int=10 , UpperCAmelCase : Union[str, Any]=[10, 20, 30, 40] , UpperCAmelCase : Optional[int]=[1, 1, 2, 1] , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : int="relu" , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : List[Any]=None , ):
lowerCAmelCase_ : Optional[Any] = parent
lowerCAmelCase_ : Tuple = batch_size
lowerCAmelCase_ : Any = image_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Union[str, Any] = embeddings_size
lowerCAmelCase_ : List[str] = hidden_sizes
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Optional[Any] = hidden_act
lowerCAmelCase_ : Optional[int] = num_labels
lowerCAmelCase_ : Union[str, Any] = scope
lowerCAmelCase_ : Optional[Any] = len(UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values
def A ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Optional[int] = FlaxRegNetModel(config=UpperCAmelCase )
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : Any , UpperCAmelCase : Tuple , UpperCAmelCase : str ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : List[Any] = FlaxRegNetForImageClassification(config=UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__snake_case : Union[str, Any] = False
__snake_case : Any = False
__snake_case : Tuple = False
def A ( self : Any ):
lowerCAmelCase_ : int = FlaxRegNetModelTester(self )
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase )
def A ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Optional[Any] ):
return
def A ( self : Any ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A ( self : Any ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A ( self : Any ):
pass
def A ( self : Tuple ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : Optional[int] ):
def check_hidden_states_output(UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : Any = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ : Optional[int] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ):
return model(pixel_values=UpperCAmelCase , **UpperCAmelCase )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase_ : List[str] = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase_ : List[Any] = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __a ( unittest.TestCase ):
@cached_property
def A ( self : str ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def A ( self : str ):
lowerCAmelCase_ : Dict = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : str = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""np""" )
lowerCAmelCase_ : int = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Optional[int] = (1, 10_00)
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 28
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 1
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCAmelCase = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def __UpperCamelCase ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowerCAmelCase_ : Tuple = R""".*/layers_(\d+)"""
lowerCAmelCase_ : Tuple = key
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase_ : Union[str, Any] = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , lowercase__ )
lowerCAmelCase_ : Any = R"""(encoder|decoder)\/"""
if re.match(lowercase__ , lowercase__ ):
lowerCAmelCase_ : List[Any] = re.match(lowercase__ , lowercase__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase_ : Tuple = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , lowercase__ )
lowerCAmelCase_ : Tuple = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , lowercase__ )
elif groups[0] == "decoder":
lowerCAmelCase_ : List[str] = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , lowercase__ )
lowerCAmelCase_ : Optional[int] = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , lowercase__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase_ : List[Any] = new_key.replace(lowercase__ , lowercase__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase_ : Dict = s_dict.pop(lowercase__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ : int = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase_ : Any = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase_ : str = s_dict[key].shape[0]
lowerCAmelCase_ : List[Any] = s_dict[key]
for idx in range(lowercase__ ):
lowerCAmelCase_ : Optional[Any] = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(lowercase__ )
return s_dict
__UpperCAmelCase = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict ) -> int:
'''simple docstring'''
import regex as re
with open(lowercase__ , """r""" ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Tuple = re.findall(R"""(.*) = ([0-9.]*)""" , lowercase__ )
lowerCAmelCase_ : Optional[int] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase_ : Any = float(lowercase__ ) if """.""" in value else int(lowercase__ )
lowerCAmelCase_ : Optional[int] = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , lowercase__ )[0]
lowerCAmelCase_ : Optional[Any] = str(activation[1] )
lowerCAmelCase_ : int = num_experts
lowerCAmelCase_ : Optional[int] = SwitchTransformersConfig(**lowercase__ )
return config
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Dict=None , lowercase__ : Any="./" , lowercase__ : List[Any]=8 ) -> List[Any]:
'''simple docstring'''
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase_ : Optional[int] = checkpoints.load_tax_checkpoint(lowercase__ )
if gin_file is not None:
lowerCAmelCase_ : Any = convert_gin_to_config(lowercase__ , lowercase__ )
else:
lowerCAmelCase_ : Any = SwitchTransformersConfig.from_pretrained(lowercase__ )
lowerCAmelCase_ : List[str] = SwitchTransformersForConditionalGeneration(lowercase__ )
lowerCAmelCase_ : str = flax_params["""target"""]
lowerCAmelCase_ : int = flatten_dict(lowercase__ , sep="""/""" )
lowerCAmelCase_ : int = rename_keys(lowercase__ )
lowerCAmelCase_ : int = unflatten_dict(lowercase__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase__ , lowercase__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
__UpperCAmelCase = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
def __UpperCamelCase ( lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase_ : Any = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase_ : List[str] = collection[i]
lowerCAmelCase_ : List[str] = 0
lowerCAmelCase_ : Dict = i - 1
while low <= high:
lowerCAmelCase_ : Optional[Any] = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase_ : Optional[int] = mid - 1
else:
lowerCAmelCase_ : List[Any] = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase_ : List[Any] = collection[j - 1]
lowerCAmelCase_ : List[Any] = val
return collection
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 28
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28
| 1
|
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__UpperCAmelCase = random.Random()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Optional[int]=1.0 , lowercase__ : int=None , lowercase__ : Tuple=None ) -> Tuple:
'''simple docstring'''
if rng is None:
lowerCAmelCase_ : int = global_rng
lowerCAmelCase_ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=4_00 , UpperCAmelCase : Union[str, Any]=20_00 , UpperCAmelCase : str=1 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=1_60_00 , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=True , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : List[str] = min_seq_length
lowerCAmelCase_ : int = max_seq_length
lowerCAmelCase_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : Optional[int] = padding_value
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Tuple = return_attention_mask
lowerCAmelCase_ : List[Any] = do_normalize
def A ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def A ( self : str , UpperCAmelCase : List[Any]=False , UpperCAmelCase : List[Any]=False ):
def _flatten(UpperCAmelCase : int ):
return list(itertools.chain(*UpperCAmelCase ) )
if equal_length:
lowerCAmelCase_ : str = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase_ : Union[str, Any] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase_ : Optional[int] = [np.asarray(UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
class __a ( __UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = WavaVecaFeatureExtractor
def A ( self : Dict ):
lowerCAmelCase_ : str = WavaVecaFeatureExtractionTester(self )
def A ( self : str , UpperCAmelCase : Dict ):
self.assertTrue(np.all(np.mean(UpperCAmelCase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase , axis=0 ) - 1 ) < 1e-3 ) )
def A ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[Any] = [np.asarray(UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase_ : List[str] = feat_extract(speech_inputs[0] , return_tensors="""np""" ).input_values
lowerCAmelCase_ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="""np""" ).input_values
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test batched
lowerCAmelCase_ : str = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
lowerCAmelCase_ : Any = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase_ : Any = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
lowerCAmelCase_ : Union[str, Any] = np.asarray(UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
lowerCAmelCase_ : List[Any] = feat_extract(UpperCAmelCase , return_tensors="""np""" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertTrue(np.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A ( self : Any ):
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCAmelCase_ : Optional[Any] = [None, 16_00, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Any = feat_extract(UpperCAmelCase , padding=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors="""np""" )
lowerCAmelCase_ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def A ( self : int ):
lowerCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Dict = range(8_00 , 14_00 , 2_00 )
lowerCAmelCase_ : Union[str, Any] = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase_ : Optional[int] = ["""longest""", """max_length""", """do_not_pad"""]
lowerCAmelCase_ : Tuple = [None, 16_00, None]
for max_length, padding in zip(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : int = feat_extract(UpperCAmelCase , max_length=UpperCAmelCase , padding=UpperCAmelCase )
lowerCAmelCase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def A ( self : Any ):
lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=10_00 , padding="""max_length""" , return_tensors="""np""" )
lowerCAmelCase_ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : List[str] = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=10_00 , padding="""longest""" , return_tensors="""np""" )
lowerCAmelCase_ : List[str] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
lowerCAmelCase_ : str = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
lowerCAmelCase_ : Any = feat_extract(
UpperCAmelCase , truncation=UpperCAmelCase , max_length=20_00 , padding="""longest""" , return_tensors="""np""" )
lowerCAmelCase_ : Tuple = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def A ( self : str ):
import torch
lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase_ : Optional[Any] = np.random.rand(1_00 ).astype(np.floataa )
lowerCAmelCase_ : Any = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase_ : Optional[int] = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase_ : str = feature_extractor.pad([{"""input_values""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def A ( self : Tuple ):
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase_ : Union[str, Any] = WavaVecaConfig.from_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == """layer""" )
| 28
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ : Any = precision
lowerCAmelCase_ : Any = ceil(precision / 14 )
lowerCAmelCase_ : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Optional[int] = 13591409
lowerCAmelCase_ : Union[str, Any] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 28
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float , **UpperCAmelCase : int ):
lowerCAmelCase_ : List[Any] = feature_size
lowerCAmelCase_ : Dict = sampling_rate
lowerCAmelCase_ : str = padding_value
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""padding_side""" , """right""" )
lowerCAmelCase_ : Any = kwargs.pop("""return_attention_mask""" , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
def A ( self : int , UpperCAmelCase : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , UpperCAmelCase : Union[bool, str, PaddingStrategy] = True , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowerCAmelCase_ : List[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
lowerCAmelCase_ : List[Any] = processed_features[self.model_input_names[0]]
lowerCAmelCase_ : Union[str, Any] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase ) == 0:
if return_attention_mask:
lowerCAmelCase_ : Any = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowerCAmelCase_ : Dict = required_input[0]
if isinstance(UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowerCAmelCase_ : str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase ):
lowerCAmelCase_ : Union[str, Any] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase ):
lowerCAmelCase_ : List[str] = """tf"""
elif is_torch_tensor(UpperCAmelCase ):
lowerCAmelCase_ : Tuple = """pt"""
elif isinstance(UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
lowerCAmelCase_ : Optional[Any] = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(UpperCAmelCase )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowerCAmelCase_ : Union[str, Any] = to_numpy(UpperCAmelCase )
else:
lowerCAmelCase_ : Union[str, Any] = [to_numpy(UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowerCAmelCase_ : str = self._get_padding_strategies(padding=UpperCAmelCase , max_length=UpperCAmelCase )
lowerCAmelCase_ : str = processed_features[self.model_input_names[0]]
lowerCAmelCase_ : List[Any] = len(UpperCAmelCase )
if not all(len(UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
lowerCAmelCase_ : int = []
for i in range(UpperCAmelCase ):
lowerCAmelCase_ : Any = {k: v[i] for k, v in processed_features.items()}
# truncation
lowerCAmelCase_ : str = self._truncate(
UpperCAmelCase , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
truncated_inputs.append(UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowerCAmelCase_ : Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowerCAmelCase_ : str = PaddingStrategy.MAX_LENGTH
lowerCAmelCase_ : Optional[int] = {}
for i in range(UpperCAmelCase ):
# padding
lowerCAmelCase_ : List[Any] = self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
lowerCAmelCase_ : List[Any] = []
if value.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : List[str] = value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase )
return BatchFeature(UpperCAmelCase , tensor_type=UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ):
lowerCAmelCase_ : Any = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowerCAmelCase_ : Optional[int] = len(UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase_ : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase_ : Tuple = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowerCAmelCase_ : List[str] = np.ones(len(UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
lowerCAmelCase_ : Optional[int] = max_length - len(UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
lowerCAmelCase_ : List[str] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
lowerCAmelCase_ : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowerCAmelCase_ : Any = np.pad(
UpperCAmelCase , UpperCAmelCase , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowerCAmelCase_ : Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
lowerCAmelCase_ : Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowerCAmelCase_ : Dict = np.pad(
UpperCAmelCase , UpperCAmelCase , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def A ( self : List[Any] , UpperCAmelCase : Union[Dict[str, np.ndarray], BatchFeature] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
lowerCAmelCase_ : Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowerCAmelCase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowerCAmelCase_ : Optional[Any] = len(UpperCAmelCase ) > max_length
if needs_to_be_truncated:
lowerCAmelCase_ : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowerCAmelCase_ : Optional[Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def A ( self : Optional[Any] , UpperCAmelCase : str=False , UpperCAmelCase : str=None ):
# Get padding strategy
if padding is not False:
if padding is True:
lowerCAmelCase_ : str = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : Any = PaddingStrategy(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase_ : str = padding
else:
lowerCAmelCase_ : Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 28
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28
| 1
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __UpperCamelCase ( lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
return EnvironmentCommand()
class __a ( __UpperCamelCase ):
@staticmethod
def A ( UpperCAmelCase : ArgumentParser ):
lowerCAmelCase_ : Any = parser.add_parser("""env""" )
download_parser.set_defaults(func=UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = huggingface_hub.__version__
lowerCAmelCase_ : List[str] = """not installed"""
lowerCAmelCase_ : List[str] = """NA"""
if is_torch_available():
import torch
lowerCAmelCase_ : List[Any] = torch.__version__
lowerCAmelCase_ : Dict = torch.cuda.is_available()
lowerCAmelCase_ : Any = """not installed"""
if is_transformers_available():
import transformers
lowerCAmelCase_ : List[Any] = transformers.__version__
lowerCAmelCase_ : Optional[int] = """not installed"""
if is_accelerate_available():
import accelerate
lowerCAmelCase_ : Union[str, Any] = accelerate.__version__
lowerCAmelCase_ : str = """not installed"""
if is_xformers_available():
import xformers
lowerCAmelCase_ : Optional[Any] = xformers.__version__
lowerCAmelCase_ : Optional[Any] = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(UpperCAmelCase ) )
return info
@staticmethod
def A ( UpperCAmelCase : Optional[Any] ):
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 28
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __a :
def __init__( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict=13 , UpperCAmelCase : Dict=7 , UpperCAmelCase : str=True , UpperCAmelCase : str=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : str=16 , UpperCAmelCase : Any=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Tuple=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Tuple = 13
lowerCAmelCase_ : List[str] = 7
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : str = 99
lowerCAmelCase_ : List[Any] = 3_84
lowerCAmelCase_ : Any = 2
lowerCAmelCase_ : Union[str, Any] = 4
lowerCAmelCase_ : Optional[int] = 37
lowerCAmelCase_ : str = """gelu"""
lowerCAmelCase_ : List[Any] = 0.1
lowerCAmelCase_ : Union[str, Any] = 0.1
lowerCAmelCase_ : int = 5_12
lowerCAmelCase_ : str = 16
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : List[str] = 0.02
lowerCAmelCase_ : List[str] = 3
lowerCAmelCase_ : List[str] = 4
lowerCAmelCase_ : Union[str, Any] = 1_28
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : List[Any] = 9
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Any = None
def A ( self : str ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : str = None
if self.use_token_type_ids:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : str = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : int = TFConvBertModel(config=UpperCAmelCase )
lowerCAmelCase_ : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCAmelCase_ : List[str] = [input_ids, input_mask]
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
lowerCAmelCase_ : List[str] = TFConvBertForMaskedLM(config=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ : str = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Any = self.num_labels
lowerCAmelCase_ : List[str] = TFConvBertForSequenceClassification(config=UpperCAmelCase )
lowerCAmelCase_ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.num_choices
lowerCAmelCase_ : List[Any] = TFConvBertForMultipleChoice(config=UpperCAmelCase )
lowerCAmelCase_ : Tuple = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Optional[int] = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase_ : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCAmelCase_ : int = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : Optional[int] = TFConvBertForTokenClassification(config=UpperCAmelCase )
lowerCAmelCase_ : int = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : str ):
lowerCAmelCase_ : Optional[Any] = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
lowerCAmelCase_ : str = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str ):
lowerCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__snake_case : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : Optional[Any] = False
__snake_case : Any = False
__snake_case : Optional[int] = False
def A ( self : Any ):
lowerCAmelCase_ : List[str] = TFConvBertModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A ( self : int ):
self.config_tester.run_common_tests()
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[str] = True
if hasattr(UpperCAmelCase , """use_cache""" ):
lowerCAmelCase_ : str = True
lowerCAmelCase_ : int = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase_ : Optional[Any] = getattr(self.model_tester , """key_length""" , UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = os.path.join(UpperCAmelCase , """saved_model""" , """1""" )
lowerCAmelCase_ : Dict = tf.keras.models.load_model(UpperCAmelCase )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
if self.is_encoder_decoder:
lowerCAmelCase_ : str = outputs["""encoder_hidden_states"""]
lowerCAmelCase_ : List[str] = outputs["""encoder_attentions"""]
else:
lowerCAmelCase_ : Union[str, Any] = outputs["""hidden_states"""]
lowerCAmelCase_ : Optional[int] = outputs["""attentions"""]
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCAmelCase_ : Dict = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def A ( self : int ):
lowerCAmelCase_ : str = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Optional[int] = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase_ : List[str] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
lowerCAmelCase_ : Any = getattr(self.model_tester , """key_length""" , UpperCAmelCase )
lowerCAmelCase_ : Any = getattr(self.model_tester , """key_length""" , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
lowerCAmelCase_ : int = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : Tuple = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Any = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
lowerCAmelCase_ : Dict = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : List[Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Any = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Optional[Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : Any = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class __a ( unittest.TestCase ):
@slow
def A ( self : List[Any] ):
lowerCAmelCase_ : List[str] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
lowerCAmelCase_ : Optional[int] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase )[0]
lowerCAmelCase_ : str = [1, 6, 7_68]
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 28
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
| 1
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __a ( __UpperCamelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=False , UpperCAmelCase : Dict=True , UpperCAmelCase : str=99 , UpperCAmelCase : int=32 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Union[str, Any]=5_12 , UpperCAmelCase : str=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : Optional[int]=0.02 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : int=None , ):
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : int = use_token_type_ids
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : str = hidden_act
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : str = attention_probs_dropout_prob
lowerCAmelCase_ : List[Any] = max_position_embeddings
lowerCAmelCase_ : Tuple = type_vocab_size
lowerCAmelCase_ : List[str] = type_sequence_label_size
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Optional[int] = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : List[str] = scope
def A ( self : int ):
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[str] = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Any , UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Any ):
lowerCAmelCase_ : Union[str, Any] = DistilBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : str = model(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : int ):
lowerCAmelCase_ : Dict = DistilBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Optional[Any] = DistilBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[Any] = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Any = DistilBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = DistilBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] ):
lowerCAmelCase_ : List[Any] = self.num_choices
lowerCAmelCase_ : List[str] = DistilBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Dict = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Any = config_and_inputs
lowerCAmelCase_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
__snake_case : Optional[Any] = (
{
"""feature-extraction""": DistilBertModel,
"""fill-mask""": DistilBertForMaskedLM,
"""question-answering""": DistilBertForQuestionAnswering,
"""text-classification""": DistilBertForSequenceClassification,
"""token-classification""": DistilBertForTokenClassification,
"""zero-shot""": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : Dict = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = True
__snake_case : int = True
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = DistilBertModelTester(self )
lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def A ( self : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
@slow
def A ( self : Optional[Any] ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : str = DistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def A ( self : Union[str, Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Union[str, Any] = model_class(config=UpperCAmelCase )
lowerCAmelCase_ : Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[str] = torch.jit.trace(
UpperCAmelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , """traced_model.pt""" ) )
lowerCAmelCase_ : int = torch.jit.load(os.path.join(UpperCAmelCase , """traced_model.pt""" ) , map_location=UpperCAmelCase )
loaded(inputs_dict["""input_ids"""].to(UpperCAmelCase ) , inputs_dict["""attention_mask"""].to(UpperCAmelCase ) )
@require_torch
class __a ( unittest.TestCase ):
@slow
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase_ : Optional[int] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
lowerCAmelCase_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : List[str] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )[0]
lowerCAmelCase_ : Optional[int] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase , atol=1e-4 ) )
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __a ( __UpperCamelCase ):
@staticmethod
@abstractmethod
def A ( UpperCAmelCase : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def A ( self : str ):
raise NotImplementedError()
| 28
|
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__UpperCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[int]=2 , lowercase__ : Optional[Any]=3 , lowercase__ : List[Any]=16 , lowercase__ : int = 10 , lowercase__ : int = 2 ) -> int:
'''simple docstring'''
def get_dataset(lowercase__ : Optional[int] ):
lowerCAmelCase_ : str = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
lowerCAmelCase_ : Any = get_dataset(lowercase__ )
lowerCAmelCase_ : Optional[int] = get_dataset(lowercase__ )
lowerCAmelCase_ : List[Any] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
lowerCAmelCase_ : List[str] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __UpperCamelCase ( lowercase__ : Any , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str=None ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Dict = []
for epoch in range(lowercase__ ):
# Train quickly
model.train()
for batch in dataloader:
lowerCAmelCase_ , lowerCAmelCase_ : str = batch
lowerCAmelCase_ : int = model(lowercase__ )
lowerCAmelCase_ : Any = torch.nn.functional.mse_loss(lowercase__ , lowercase__ )
accelerator.backward(lowercase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __a ( nn.Module ):
def __init__( self : str ):
super().__init__()
lowerCAmelCase_ : List[str] = nn.Parameter(torch.randn(1 ) )
lowerCAmelCase_ : str = nn.Parameter(torch.randn(1 ) )
def A ( self : str , UpperCAmelCase : int ):
return x * self.a + self.b
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase_ : Tuple = DummyModel()
lowerCAmelCase_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = dummy_dataloaders()
lowerCAmelCase_ : int = ProjectConfiguration(total_limit=1 , project_dir=UpperCAmelCase , automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowerCAmelCase_ : Union[str, Any] = Accelerator(project_config=UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase_ : Any = DummyModel()
lowerCAmelCase_ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = dummy_dataloaders()
# Train baseline
lowerCAmelCase_ : Optional[Any] = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
lowerCAmelCase_ : str = os.path.join(UpperCAmelCase , """initial""" )
accelerator.save_state(UpperCAmelCase )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Any = model.a.item(), model.b.item()
lowerCAmelCase_ : Tuple = optimizer.state_dict()
lowerCAmelCase_ : Dict = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
lowerCAmelCase_ : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase_ : List[str] = DummyModel()
lowerCAmelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase_ , lowerCAmelCase_ : int = dummy_dataloaders()
lowerCAmelCase_ : Tuple = Accelerator()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(UpperCAmelCase )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Dict = model.a.item(), model.b.item()
lowerCAmelCase_ : Tuple = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
lowerCAmelCase_ : Union[str, Any] = os.path.join(UpperCAmelCase , """checkpoint""" )
accelerator.save_state(UpperCAmelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCAmelCase )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
lowerCAmelCase_ : Any = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A ( self : Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase_ : Optional[Any] = DummyModel()
lowerCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = dummy_dataloaders()
lowerCAmelCase_ : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowerCAmelCase_ : Tuple = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
lowerCAmelCase_ : int = optimizer.state_dict()
lowerCAmelCase_ : Union[str, Any] = train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Any = model.a.item(), model.b.item()
lowerCAmelCase_ : Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
lowerCAmelCase_ : List[Any] = DummyModel()
lowerCAmelCase_ : str = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = dummy_dataloaders()
lowerCAmelCase_ : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCAmelCase )
lowerCAmelCase_ : Any = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
accelerator.load_state(os.path.join(UpperCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[int] = model.a.item(), model.b.item()
lowerCAmelCase_ : List[str] = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = train(2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
((lowerCAmelCase_) , (lowerCAmelCase_)) : Optional[Any] = model.a.item(), model.b.item()
lowerCAmelCase_ : Any = optimizer.state_dict()
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Tuple = torch.tensor([1, 2, 3] )
lowerCAmelCase_ : int = torch.tensor([2, 3, 4] )
lowerCAmelCase_ : Optional[int] = DummyModel()
lowerCAmelCase_ : Optional[Any] = torch.optim.Adam(net.parameters() )
lowerCAmelCase_ : Optional[Any] = Accelerator()
with self.assertRaises(UpperCAmelCase ) as ve:
accelerator.register_for_checkpointing(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def A ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase_ : Any = DummyModel()
lowerCAmelCase_ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
lowerCAmelCase_ : Tuple = torch.optim.lr_scheduler.StepLR(UpperCAmelCase , step_size=1 , gamma=0.99 )
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = dummy_dataloaders()
lowerCAmelCase_ : int = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase )
# Train baseline
lowerCAmelCase_ : str = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = accelerator.prepare(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save initial
accelerator.save_state()
lowerCAmelCase_ : Optional[Any] = scheduler.state_dict()
train(3 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.assertNotEqual(UpperCAmelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCAmelCase , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(UpperCAmelCase , scheduler.state_dict() )
def A ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
lowerCAmelCase_ : Union[str, Any] = DummyModel()
lowerCAmelCase_ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=UpperCAmelCase , total_limit=2 )
# Train baseline
lowerCAmelCase_ : Optional[Any] = Accelerator(project_dir=UpperCAmelCase , project_config=UpperCAmelCase )
lowerCAmelCase_ : List[str] = accelerator.prepare(UpperCAmelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCAmelCase , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCAmelCase , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def A ( self : Dict ):
lowerCAmelCase_ : Optional[Any] = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = '/tmp/accelerate/state_checkpointing'
__UpperCAmelCase = DummyModel()
__UpperCAmelCase = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__UpperCAmelCase = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__UpperCAmelCase , __UpperCAmelCase = dummy_dataloaders()
__UpperCAmelCase = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__UpperCAmelCase = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__UpperCAmelCase , __UpperCAmelCase = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__UpperCAmelCase = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__UpperCAmelCase = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 1
|
__UpperCAmelCase = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__UpperCAmelCase = {value: key for key, value in MORSE_CODE_DICT.items()}
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def __UpperCamelCase ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ : Dict = """Morse code here!"""
print(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = encrypt(lowercase__ )
print(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = decrypt(lowercase__ )
print(lowercase__ )
if __name__ == "__main__":
main()
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
| 1
|
from __future__ import annotations
class __a :
def __init__( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : str ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = text, pattern
lowerCAmelCase_ , lowerCAmelCase_ : Dict = len(UpperCAmelCase ), len(UpperCAmelCase )
def A ( self : Any , UpperCAmelCase : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A ( self : Optional[int] , UpperCAmelCase : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A ( self : List[Any] ):
# searches pattern in text and returns index positions
lowerCAmelCase_ : str = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase_ : List[Any] = self.mismatch_in_text(UpperCAmelCase )
if mismatch_index == -1:
positions.append(UpperCAmelCase )
else:
lowerCAmelCase_ : Any = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase_ : Union[str, Any] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCAmelCase = 'ABAABA'
__UpperCAmelCase = 'AB'
__UpperCAmelCase = BoyerMooreSearch(text, pattern)
__UpperCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 28
|
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28
| 1
|
import os
def __UpperCamelCase ( lowercase__ : str = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file:
lowerCAmelCase_ : Optional[Any] = [
[int(lowercase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowerCAmelCase_ : int = len(lowercase__ )
lowerCAmelCase_ : Dict = len(matrix[0] )
lowerCAmelCase_ : List[Any] = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )]
for i in range(lowercase__ ):
lowerCAmelCase_ : Optional[Any] = matrix[i][0]
for j in range(1 , lowercase__ ):
for i in range(lowercase__ ):
lowerCAmelCase_ : Optional[int] = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowercase__ ):
lowerCAmelCase_ : str = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCAmelCase_ : Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 28
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __a ( __UpperCamelCase ):
__snake_case : int = """facebook/nllb-200-distilled-600M"""
__snake_case : Optional[int] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__snake_case : str = """translator"""
__snake_case : Any = AutoTokenizer
__snake_case : Union[str, Any] = AutoModelForSeqaSeqLM
__snake_case : Optional[int] = LANGUAGE_CODES
__snake_case : int = ["""text""", """text""", """text"""]
__snake_case : str = ["""text"""]
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
lowerCAmelCase_ : List[Any] = self.lang_to_code[src_lang]
lowerCAmelCase_ : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase , return_tensors="""pt""" , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : str ):
return self.model.generate(**UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase )
| 28
| 1
|
from __future__ import annotations
__UpperCAmelCase = [True] * 1_00_00_01
__UpperCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__UpperCAmelCase = False
i += 1
def __UpperCamelCase ( lowercase__ : int ) -> bool:
'''simple docstring'''
return seive[n]
def __UpperCamelCase ( lowercase__ : int ) -> bool:
'''simple docstring'''
return any(digit in """02468""" for digit in str(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : int = 1000000 ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ : int = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase__ ) and not contains_an_even_digit(lowercase__ ):
lowerCAmelCase_ : Tuple = str(lowercase__ )
lowerCAmelCase_ : List[Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase__ ) )]
if all(is_prime(lowercase__ ) for i in list_nums ):
result.append(lowercase__ )
return result
def __UpperCamelCase ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(f"""{len(find_circular_primes()) = }""")
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase_ : Tuple = BitConfig(
conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase_ : Dict = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase_ : List[str] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase_ : Any = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_config(lowercase__ )
# load original model from timm
lowerCAmelCase_ : str = create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase_ : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase_ : List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase_ : Dict = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase_ : Tuple = BitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# create image processor
lowerCAmelCase_ : Tuple = create_transform(**resolve_data_config({} , model=lowercase__ ) )
lowerCAmelCase_ : Union[str, Any] = transform.transforms
lowerCAmelCase_ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase_ : List[str] = BitImageProcessor(
do_resize=lowercase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = transform(lowercase__ ).unsqueeze(0 )
lowerCAmelCase_ : List[str] = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(lowercase__ )
lowerCAmelCase_ : List[str] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 1
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28
| 1
|
import torch
from torch import nn
class __a ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any]=1 , UpperCAmelCase : Tuple=False ):
super().__init__()
lowerCAmelCase_ : Optional[Any] = n_token
lowerCAmelCase_ : int = d_embed
lowerCAmelCase_ : Any = d_proj
lowerCAmelCase_ : str = cutoffs + [n_token]
lowerCAmelCase_ : Optional[Any] = [0] + self.cutoffs
lowerCAmelCase_ : Union[str, Any] = div_val
lowerCAmelCase_ : Optional[Any] = self.cutoffs[0]
lowerCAmelCase_ : List[Any] = len(self.cutoffs ) - 1
lowerCAmelCase_ : List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCAmelCase_ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCAmelCase_ : List[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCAmelCase_ : Tuple = nn.ModuleList()
lowerCAmelCase_ : Any = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase , UpperCAmelCase ) ) )
else:
self.out_projs.append(UpperCAmelCase )
self.out_layers.append(nn.Linear(UpperCAmelCase , UpperCAmelCase ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCAmelCase_ , lowerCAmelCase_ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ : List[str] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCAmelCase , UpperCAmelCase ) ) )
self.out_layers.append(nn.Linear(UpperCAmelCase , r_idx - l_idx ) )
lowerCAmelCase_ : Optional[int] = keep_order
def A ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Any ):
if proj is None:
lowerCAmelCase_ : List[Any] = nn.functional.linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCAmelCase_ : int = nn.functional.linear(UpperCAmelCase , proj.t().contiguous() )
lowerCAmelCase_ : Tuple = nn.functional.linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowerCAmelCase_ : int = hidden[..., :-1, :].contiguous()
lowerCAmelCase_ : int = labels[..., 1:].contiguous()
lowerCAmelCase_ : Optional[int] = hidden.view(-1 , hidden.size(-1 ) )
lowerCAmelCase_ : Union[str, Any] = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCAmelCase_ : int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCAmelCase_ : Union[str, Any] = self._compute_logit(UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCAmelCase_ : Union[str, Any] = labels != -1_00
lowerCAmelCase_ : Optional[Any] = torch.zeros_like(UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase_ : Dict = (
-nn.functional.log_softmax(UpperCAmelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCAmelCase_ : int = nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase_ , lowerCAmelCase_ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ : Optional[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase_ : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase_ : List[str] = self.out_layers[i].weight
lowerCAmelCase_ : Tuple = self.out_layers[i].bias
if i == 0:
lowerCAmelCase_ : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase_ : str = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase )
biases.append(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase_ : Optional[Any] = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Dict = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
if labels is None:
lowerCAmelCase_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCAmelCase_ : Dict = torch.zeros_like(UpperCAmelCase , dtype=hidden.dtype , device=hidden.device )
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Tuple = [0] + self.cutoffs
for i in range(len(UpperCAmelCase ) - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCAmelCase_ : Union[str, Any] = (labels >= l_idx) & (labels < r_idx)
lowerCAmelCase_ : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCAmelCase_ : List[str] = labels.index_select(0 , UpperCAmelCase ) - l_idx
lowerCAmelCase_ : Optional[Any] = head_logprob.index_select(0 , UpperCAmelCase )
lowerCAmelCase_ : Dict = hidden.index_select(0 , UpperCAmelCase )
else:
lowerCAmelCase_ : Optional[Any] = hidden
if i == 0:
if labels is not None:
lowerCAmelCase_ : List[str] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase_ : str = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase_ : int = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowerCAmelCase_ : Union[str, Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCAmelCase_ : List[str] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCAmelCase_ : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCAmelCase_ : Optional[Any] = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCAmelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def A ( self : Optional[int] , UpperCAmelCase : List[Any] ):
if self.n_clusters == 0:
lowerCAmelCase_ : Optional[int] = self._compute_logit(UpperCAmelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCAmelCase , dim=-1 )
else:
# construct weights and biases
lowerCAmelCase_ , lowerCAmelCase_ : int = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCAmelCase_ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCAmelCase_ : Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCAmelCase_ : Union[str, Any] = self.out_layers[i].weight
lowerCAmelCase_ : Dict = self.out_layers[i].bias
if i == 0:
lowerCAmelCase_ : Tuple = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCAmelCase_ : Dict = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCAmelCase )
biases.append(UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = weights[0], biases[0], self.out_projs[0]
lowerCAmelCase_ : Optional[Any] = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Any = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCAmelCase_ : Dict = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowerCAmelCase_ : Dict = [0] + self.cutoffs
for i in range(len(UpperCAmelCase ) - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCAmelCase_ : str = head_logprob[:, : self.cutoffs[0]]
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = weights[i], biases[i], self.out_projs[i]
lowerCAmelCase_ : Optional[int] = self._compute_logit(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = nn.functional.log_softmax(UpperCAmelCase , dim=1 )
lowerCAmelCase_ : Optional[int] = head_logprob[:, -i] + tail_logprob_i
lowerCAmelCase_ : List[str] = logprob_i
return out
| 28
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28
| 1
|
__UpperCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__UpperCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__UpperCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 28
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : Any = ["""image_processor""", """tokenizer"""]
__snake_case : Tuple = """BlipImageProcessor"""
__snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : str = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase_ : str = self.tokenizer
lowerCAmelCase_ : List[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
lowerCAmelCase_ : int = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : int ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCamelCase ( lowercase__ : int ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : int = int(number**0.5 )
return number == sq * sq
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> tuple[int, int]:
'''simple docstring'''
lowerCAmelCase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase_ : int = x_den * y_den * z_den
lowerCAmelCase_ : int = gcd(lowercase__ , lowercase__ )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCamelCase ( lowercase__ : int = 35 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : set = set()
lowerCAmelCase_ : int
lowerCAmelCase_ : Fraction = Fraction(0 )
lowerCAmelCase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowerCAmelCase_ : Union[str, Any] = x_num * y_den + x_den * y_num
lowerCAmelCase_ : Union[str, Any] = x_den * y_den
lowerCAmelCase_ : Optional[int] = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : str = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
lowerCAmelCase_ : List[Any] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase_ : Tuple = x_den * x_den * y_den * y_den
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
lowerCAmelCase_ : Any = int(sqrt(lowercase__ ) )
lowerCAmelCase_ : List[str] = int(sqrt(lowercase__ ) )
lowerCAmelCase_ : List[str] = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Dict = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=-1
lowerCAmelCase_ : Union[str, Any] = x_num * y_num
lowerCAmelCase_ : Any = x_den * y_num + x_num * y_den
lowerCAmelCase_ : Tuple = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : Optional[Any] = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
# n=2
lowerCAmelCase_ : Tuple = x_num * x_num * y_num * y_num
lowerCAmelCase_ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowercase__ ) and is_sq(lowercase__ ):
lowerCAmelCase_ : Union[str, Any] = int(sqrt(lowercase__ ) )
lowerCAmelCase_ : Any = int(sqrt(lowercase__ ) )
lowerCAmelCase_ : Tuple = gcd(lowercase__ , lowercase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase_ : int = add_three(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
unique_s.add(lowercase__ )
for num, den in unique_s:
total += Fraction(lowercase__ , lowercase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCAmelCase = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[int]=100 , lowercase__ : int=" " ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = text.split(lowercase__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(lowercase__ ) , lowercase__ )]
def __UpperCamelCase ( lowercase__ : dict ) -> dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""] ):
if text is not None:
for passage in split_text(lowercase__ ):
titles.append(title if title is not None else """""" )
texts.append(lowercase__ )
return {"title": titles, "text": texts}
def __UpperCamelCase ( lowercase__ : dict , lowercase__ : DPRContextEncoder , lowercase__ : DPRContextEncoderTokenizerFast ) -> dict:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=lowercase__ , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""]
lowerCAmelCase_ : Any = ctx_encoder(input_ids.to(device=lowercase__ ) , return_dict=lowercase__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def __UpperCamelCase ( lowercase__ : "RagExampleArguments" , lowercase__ : "ProcessingArguments" , lowercase__ : "IndexHnswArguments" , ) -> Dict:
'''simple docstring'''
logger.info("""Step 1 - Create the dataset""" )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase_ : Any = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase_ : int = dataset.map(lowercase__ , batched=lowercase__ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase_ : Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=lowercase__ )
lowerCAmelCase_ : Optional[Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase_ : List[Any] = Features(
{"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase_ : List[str] = dataset.map(
partial(lowercase__ , ctx_encoder=lowercase__ , ctx_tokenizer=lowercase__ ) , batched=lowercase__ , batch_size=processing_args.batch_size , features=lowercase__ , )
# And finally save your dataset
lowerCAmelCase_ : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" )
dataset.save_to_disk(lowercase__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""" )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase_ : List[str] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index("""embeddings""" , custom_index=lowercase__ )
# And save the index
lowerCAmelCase_ : List[Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" )
dataset.get_index("""embeddings""" ).save(lowercase__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __a :
__snake_case : str = field(
default=str(Path(__UpperCamelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) ,metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} ,)
__snake_case : Optional[str] = field(
default=__UpperCamelCase ,metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} ,)
__snake_case : str = field(
default="""facebook/rag-sequence-nq""" ,metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} ,)
__snake_case : str = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" ,metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} ,)
__snake_case : Optional[str] = field(
default=str(Path(__UpperCamelCase ).parent / """test_run""" / """dummy-kb""" ) ,metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} ,)
@dataclass
class __a :
__snake_case : Optional[int] = field(
default=__UpperCamelCase ,metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} ,)
__snake_case : int = field(
default=16 ,metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} ,)
@dataclass
class __a :
__snake_case : int = field(
default=768 ,metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} ,)
__snake_case : int = field(
default=128 ,metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} ,)
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 28
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 1
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __a :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : int=13 , UpperCAmelCase : str=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Dict=4 , UpperCAmelCase : List[str]=37 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : Dict=16 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : Any=3 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[Any]=None , ):
lowerCAmelCase_ : Union[str, Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : Any = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : str = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Any = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : List[str] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : int = scope
def A ( self : Tuple ):
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Dict ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Optional[Any] = NystromformerModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Dict = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Dict ):
lowerCAmelCase_ : Optional[int] = NystromformerForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str ):
lowerCAmelCase_ : Tuple = NystromformerForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Optional[int] = NystromformerForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = NystromformerForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = self.num_choices
lowerCAmelCase_ : Tuple = NystromformerForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : int = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Tuple = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case : Dict = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Optional[int] = False
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = NystromformerModelTester(self )
lowerCAmelCase_ : Any = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A ( self : Tuple ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : str = type
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A ( self : List[str] ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A ( self : List[str] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : List[str] = NystromformerModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_torch
class __a ( unittest.TestCase ):
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase )[0]
lowerCAmelCase_ : List[str] = torch.Size((1, 6, 7_68) )
self.assertEqual(output.shape , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
def A ( self : Dict ):
lowerCAmelCase_ : int = """the [MASK] of Belgium is Brussels"""
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase_ : Union[str, Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowerCAmelCase_ : Optional[int] = tokenizer(UpperCAmelCase , return_tensors="""pt""" )
with torch.no_grad():
lowerCAmelCase_ : Union[str, Any] = model(encoding.input_ids ).logits
lowerCAmelCase_ : Union[str, Any] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCAmelCase ) , """capital""" )
| 28
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
| 1
|
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__UpperCAmelCase = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 28
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 1
|
import argparse
import datetime
def __UpperCamelCase ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCAmelCase_ : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowercase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCAmelCase_ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCAmelCase_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCAmelCase_ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCAmelCase_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCAmelCase_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCAmelCase_ : Dict = datetime.date(int(lowercase__ ) , int(lowercase__ ) , int(lowercase__ ) )
# Start math
if m <= 2:
lowerCAmelCase_ : Union[str, Any] = y - 1
lowerCAmelCase_ : List[str] = m + 12
# maths var
lowerCAmelCase_ : int = int(str(lowercase__ )[:2] )
lowerCAmelCase_ : int = int(str(lowercase__ )[2:] )
lowerCAmelCase_ : int = int(2.6 * m - 5.39 )
lowerCAmelCase_ : int = int(c / 4 )
lowerCAmelCase_ : int = int(k / 4 )
lowerCAmelCase_ : int = int(d + k )
lowerCAmelCase_ : int = int(t + u + v + x )
lowerCAmelCase_ : int = int(z - (2 * c) )
lowerCAmelCase_ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCAmelCase_ : str = f'Your date {date_input}, is a {days[str(lowercase__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__UpperCAmelCase = parser.parse_args()
zeller(args.date_input)
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def A ( self : List[str] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : str ):
lowerCAmelCase_ : Dict = ort.SessionOptions()
lowerCAmelCase_ : Any = False
return options
def A ( self : Tuple ):
lowerCAmelCase_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
lowerCAmelCase_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
lowerCAmelCase_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
lowerCAmelCase_ : Optional[int] = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCAmelCase , feature_extractor=UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCAmelCase_ : Tuple = """A red cat sitting on a park bench"""
lowerCAmelCase_ : Any = np.random.RandomState(0 )
lowerCAmelCase_ : str = pipe(
prompt=UpperCAmelCase , image=UpperCAmelCase , mask_image=UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=UpperCAmelCase , output_type="""np""" , )
lowerCAmelCase_ : Optional[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 28
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28
| 1
|
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 2_55 , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = True , **UpperCAmelCase : List[Any] , ):
super().__init__(**UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = size if size is not None else {"""shortest_edge""": 2_24}
lowerCAmelCase_ : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCAmelCase_ : List[str] = crop_size if crop_size is not None else {"""height""": 2_56, """width""": 2_56}
lowerCAmelCase_ : List[str] = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
lowerCAmelCase_ : Tuple = do_resize
lowerCAmelCase_ : List[str] = size
lowerCAmelCase_ : List[Any] = resample
lowerCAmelCase_ : int = do_rescale
lowerCAmelCase_ : Optional[Any] = rescale_factor
lowerCAmelCase_ : List[str] = do_center_crop
lowerCAmelCase_ : int = crop_size
lowerCAmelCase_ : List[Any] = do_flip_channel_order
def A ( self : Any , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PIL.Image.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ):
lowerCAmelCase_ : int = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCAmelCase_ : Any = get_resize_output_image_size(UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=UpperCAmelCase )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : Optional[int] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[str] , ):
lowerCAmelCase_ : str = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : Tuple , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Any , ):
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None ):
return flip_channel_order(UpperCAmelCase , data_format=UpperCAmelCase )
def A ( self : Optional[int] , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Optional[Any] , ):
lowerCAmelCase_ : Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ : List[str] = resample if resample is not None else self.resample
lowerCAmelCase_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase_ : Dict = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowerCAmelCase_ : Dict = size if size is not None else self.size
lowerCAmelCase_ : int = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCAmelCase_ : str = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase_ : str = get_size_dict(UpperCAmelCase , param_name="""crop_size""" )
lowerCAmelCase_ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ : Optional[int] = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCAmelCase_ : List[str] = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCAmelCase_ : List[str] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCAmelCase_ : Optional[int] = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowerCAmelCase_ : Any = [self.flip_channel_order(image=UpperCAmelCase ) for image in images]
lowerCAmelCase_ : Optional[int] = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCAmelCase_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
def A ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Tuple] = None ):
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(UpperCAmelCase ):
lowerCAmelCase_ : List[str] = target_sizes.numpy()
lowerCAmelCase_ : List[str] = []
for idx in range(len(UpperCAmelCase ) ):
lowerCAmelCase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCAmelCase )
lowerCAmelCase_ : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCAmelCase )
else:
lowerCAmelCase_ : str = logits.argmax(dim=1 )
lowerCAmelCase_ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 28
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ : Any = precision
lowerCAmelCase_ : Any = ceil(precision / 14 )
lowerCAmelCase_ : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Optional[int] = 13591409
lowerCAmelCase_ : Union[str, Any] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 28
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__UpperCAmelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = state_dict.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( lowercase__ : Dict ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
lowerCAmelCase_ : int = value
else:
lowerCAmelCase_ : Tuple = value
return new_state_dict
def __UpperCamelCase ( lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCAmelCase_ : Dict = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase_ : List[str] = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : str = in_proj_weight[:256, :]
lowerCAmelCase_ : Optional[int] = in_proj_bias[:256]
lowerCAmelCase_ : Dict = in_proj_weight[256:512, :]
lowerCAmelCase_ : int = in_proj_bias[256:512]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[-256:, :]
lowerCAmelCase_ : Any = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
lowerCAmelCase_ : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : int = in_proj_weight[:256, :]
lowerCAmelCase_ : Dict = in_proj_bias[:256]
lowerCAmelCase_ : int = in_proj_weight[256:512, :]
lowerCAmelCase_ : Optional[Any] = in_proj_bias[256:512]
lowerCAmelCase_ : Dict = in_proj_weight[-256:, :]
lowerCAmelCase_ : Any = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowerCAmelCase_ : List[Any] = state_dict.pop(
f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight' )
lowerCAmelCase_ : List[Any] = state_dict.pop(f'{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowerCAmelCase_ : Dict = in_proj_weight_cross_attn[:256, :]
lowerCAmelCase_ : List[str] = in_proj_bias_cross_attn[:256]
lowerCAmelCase_ : Tuple = in_proj_weight_cross_attn[256:512, :]
lowerCAmelCase_ : Tuple = in_proj_bias_cross_attn[256:512]
lowerCAmelCase_ : Optional[int] = in_proj_weight_cross_attn[-256:, :]
lowerCAmelCase_ : List[str] = in_proj_bias_cross_attn[-256:]
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = image.size
lowerCAmelCase_ : List[Any] = max(lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[Any] = 800 if """detection""" in checkpoint_url else 1000
lowerCAmelCase_ : Dict = target_max_size / current_max_size
lowerCAmelCase_ : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = F.to_tensor(lowercase__ )
lowerCAmelCase_ : Optional[int] = F.normalize(lowercase__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase_ : Optional[int] = torch.hub.load_state_dict_from_url(lowercase__ , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ : List[str] = rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCAmelCase_ : Any = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
lowerCAmelCase_ : Tuple = state_dict.pop(lowercase__ )
lowerCAmelCase_ : Tuple = val
# create HuggingFace model and load state dict
lowerCAmelCase_ : List[str] = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowerCAmelCase_ : List[Any] = 15
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : str = {0: """table""", 1: """table rotated"""}
lowerCAmelCase_ : Tuple = idalabel
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
else:
lowerCAmelCase_ : Union[str, Any] = 125
lowerCAmelCase_ : int = 6
lowerCAmelCase_ : List[Any] = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Tuple = DetrImageProcessor(
format="""coco_detection""" , max_size=800 if """detection""" in checkpoint_url else 1000 )
lowerCAmelCase_ : List[Any] = TableTransformerForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# verify our conversion
lowerCAmelCase_ : Optional[Any] = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
lowerCAmelCase_ : int = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=lowercase__ )
lowerCAmelCase_ : Any = Image.open(lowercase__ ).convert("""RGB""" )
lowerCAmelCase_ : Optional[int] = normalize(resize(lowercase__ , lowercase__ ) ).unsqueeze(0 )
lowerCAmelCase_ : Any = model(lowercase__ )
if "detection" in checkpoint_url:
lowerCAmelCase_ : Optional[int] = (1, 15, 3)
lowerCAmelCase_ : Tuple = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowerCAmelCase_ : Any = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowerCAmelCase_ : Any = (1, 125, 7)
lowerCAmelCase_ : Dict = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowerCAmelCase_ : str = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase__ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
lowerCAmelCase_ : int = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(lowercase__ )
image_processor.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28
| 1
|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCamelCase ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> float:
'''simple docstring'''
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowercase__ , lowercase__ ) ) )
def __UpperCamelCase ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> list[list[list[float] | float]]:
'''simple docstring'''
if dataset.ndim != value_array.ndim:
lowerCAmelCase_ : Dict = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(lowercase__ )
try:
if dataset.shape[1] != value_array.shape[1]:
lowerCAmelCase_ : Union[str, Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(lowercase__ )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
lowerCAmelCase_ : str = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(lowercase__ )
lowerCAmelCase_ : str = []
for value in value_array:
lowerCAmelCase_ : int = euclidean(lowercase__ , dataset[0] )
lowerCAmelCase_ : Tuple = dataset[0].tolist()
for dataset_value in dataset[1:]:
lowerCAmelCase_ : Any = euclidean(lowercase__ , lowercase__ )
if dist > temp_dist:
lowerCAmelCase_ : Any = temp_dist
lowerCAmelCase_ : List[Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCamelCase ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ) -> float:
'''simple docstring'''
return np.dot(lowercase__ , lowercase__ ) / (norm(lowercase__ ) * norm(lowercase__ ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 1
|
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __a ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ : str = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
lowerCAmelCase_ : Optional[int] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(UpperCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , decoder_input_ids=UpperCAmelCase ).logits
lowerCAmelCase_ : Dict = optax.softmax_cross_entropy(UpperCAmelCase , onehot(UpperCAmelCase , logits.shape[-1] ) ).mean()
lowerCAmelCase_ : List[str] = -(labels.shape[-1] * loss.item())
lowerCAmelCase_ : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 28
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
| 1
|
def __UpperCamelCase ( lowercase__ : Tuple , lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(lowercase__ , n - 1 , lowercase__ ) * a) % mod
else:
lowerCAmelCase_ : Dict = binary_exponentiation(lowercase__ , n / 2 , lowercase__ )
return (b * b) % mod
# a prime number
__UpperCAmelCase = 7_01
__UpperCAmelCase = 10_00_00_00_00
__UpperCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28
| 1
|
from __future__ import annotations
from math import pow, sqrt
def __UpperCamelCase ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase__ , 2 ) - pow(lowercase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase__ , 2 ) - pow(lowercase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase__ , 2 ) + pow(lowercase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __a ( __UpperCamelCase ):
def A ( self : List[str] , UpperCAmelCase : str ):
with open(UpperCAmelCase , encoding="""utf-8""" ) as input_file:
lowerCAmelCase_ : int = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowerCAmelCase_ : List[Any] = input_file.read()
lowerCAmelCase_ : Optional[Any] = regexp.search(UpperCAmelCase )
return match
def A ( self : List[str] , UpperCAmelCase : str ):
with open(UpperCAmelCase , encoding="""utf-8""" ) as input_file:
lowerCAmelCase_ : List[str] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowerCAmelCase_ : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase_ : Tuple = regexp.finditer(UpperCAmelCase )
lowerCAmelCase_ : str = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A ( self : Tuple ):
lowerCAmelCase_ : int = Path("""./datasets""" )
lowerCAmelCase_ : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(UpperCAmelCase ) ):
raise AssertionError(F'open(...) must use utf-8 encoding in {dataset}' )
def A ( self : Dict ):
lowerCAmelCase_ : List[Any] = Path("""./datasets""" )
lowerCAmelCase_ : Dict = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(UpperCAmelCase ) ):
raise AssertionError(F'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 28
|
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : Tuple="attention" ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Any = params[f'{prefix}/layers_{i}/{layer_name}/key/kernel']
lowerCAmelCase_ : Optional[Any] = params[f'{prefix}/layers_{i}/{layer_name}/out/kernel']
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/{layer_name}/query/kernel']
lowerCAmelCase_ : Tuple = params[f'{prefix}/layers_{i}/{layer_name}/value/kernel']
return k, o, q, v
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_0/kernel']
lowerCAmelCase_ : List[Any] = params[f'{prefix}/layers_{i}/mlp/wi_1/kernel']
lowerCAmelCase_ : int = (wi_a, wi_a)
else:
lowerCAmelCase_ : str = params[f'{prefix}/layers_{i}/mlp/wi/kernel']
lowerCAmelCase_ : int = params[f'{prefix}/layers_{i}/mlp/wo/kernel']
return wi, wo
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
return params[f'{prefix}/layers_{i}/{layer_name}/scale']
def __UpperCamelCase ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[str] = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ : List[Any] = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ : Dict = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ : Optional[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ : Tuple = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Optional[int] = k.T
lowerCAmelCase_ : List[Any] = o.T
lowerCAmelCase_ : Union[str, Any] = q.T
lowerCAmelCase_ : Any = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ : Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ : str = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : Optional[int] = wi[0].T
lowerCAmelCase_ : Optional[Any] = wi[1].T
else:
lowerCAmelCase_ : int = wi.T
lowerCAmelCase_ : Optional[Any] = wo.T
lowerCAmelCase_ : Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ : str = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ : int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ : Dict = layer_norm
lowerCAmelCase_ : Union[str, Any] = k.T
lowerCAmelCase_ : Union[str, Any] = o.T
lowerCAmelCase_ : Any = q.T
lowerCAmelCase_ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ : Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ : Optional[int] = layer_norm
lowerCAmelCase_ : Any = k.T
lowerCAmelCase_ : Any = o.T
lowerCAmelCase_ : Optional[int] = q.T
lowerCAmelCase_ : Dict = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ : List[str] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ : int = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase_ : List[str] = wi[0].T
lowerCAmelCase_ : List[Any] = wi[1].T
else:
lowerCAmelCase_ : Optional[Any] = wi.T
lowerCAmelCase_ : str = wo.T
lowerCAmelCase_ : int = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ : Union[str, Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ : Optional[Any] = old["""decoder/logits_dense/kernel"""].T
return new
def __UpperCamelCase ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : List[Any] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ : Union[str, Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ : List[str] = state_dict["""shared.weight"""]
return state_dict
def __UpperCamelCase ( lowercase__ : Dict , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Tuple = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ : List[str] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ : List[str] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : List[Any] , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase_ : Any = TaConfig.from_json_file(lowercase__ )
print(f'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ : Optional[int] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ : Dict = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 28
| 1
|
from __future__ import annotations
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : list[list[int]] ):
lowerCAmelCase_ : Tuple = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(UpperCAmelCase ) != 0:
lowerCAmelCase_ : str = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(UpperCAmelCase , (int, float) ):
raise error
lowerCAmelCase_ : Any = rows
else:
lowerCAmelCase_ : List[str] = []
def A ( self : Dict ):
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A ( self : Union[str, Any] ):
return len(self.rows )
@property
def A ( self : List[Any] ):
return len(self.rows[0] )
@property
def A ( self : Dict ):
return (self.num_rows, self.num_columns)
@property
def A ( self : Union[str, Any] ):
return self.order[0] == self.order[1]
def A ( self : Tuple ):
lowerCAmelCase_ : str = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(UpperCAmelCase )
def A ( self : Dict ):
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A ( self : Tuple ):
return bool(self.determinant() )
def A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : int ):
lowerCAmelCase_ : str = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(UpperCAmelCase ).determinant()
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : int ):
if (row + column) % 2 == 0:
return self.get_minor(UpperCAmelCase , UpperCAmelCase )
return -1 * self.get_minor(UpperCAmelCase , UpperCAmelCase )
def A ( self : Dict ):
return Matrix(
[
[self.get_minor(UpperCAmelCase , UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A ( self : Optional[Any] ):
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A ( self : Dict ):
lowerCAmelCase_ : str = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(UpperCAmelCase )
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Union[str, Any] ):
return str(self.rows )
def __str__( self : List[str] ):
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(UpperCAmelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def A ( self : Optional[int] , UpperCAmelCase : list[int] , UpperCAmelCase : int | None = None ):
lowerCAmelCase_ : Optional[int] = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(UpperCAmelCase , (int, float) ):
raise type_error
if len(UpperCAmelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(UpperCAmelCase )
else:
lowerCAmelCase_ : Tuple = self.rows[0:position] + [row] + self.rows[position:]
def A ( self : List[Any] , UpperCAmelCase : list[int] , UpperCAmelCase : int | None = None ):
lowerCAmelCase_ : List[Any] = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(UpperCAmelCase , (int, float) ):
raise type_error
if len(UpperCAmelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
lowerCAmelCase_ : Dict = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCAmelCase_ : Dict = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : int , UpperCAmelCase : object ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Tuple , UpperCAmelCase : object ):
return not self == other
def __neg__( self : Any ):
return self * -1
def __add__( self : Union[str, Any] , UpperCAmelCase : Matrix ):
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , UpperCAmelCase : Matrix ):
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : str , UpperCAmelCase : Matrix | int | float ):
if isinstance(UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(UpperCAmelCase , UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : int , UpperCAmelCase : int ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
lowerCAmelCase_ : str = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A ( cls : Optional[int] , UpperCAmelCase : list[int] , UpperCAmelCase : list[int] ):
return sum(row[i] * column[i] for i in range(len(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : str=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowerCAmelCase_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Dict , lowercase__ : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ : int = """"""
else:
lowerCAmelCase_ : Union[str, Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ : str = state_dict.pop(f'blocks.{i}.attn.qkv.weight' )
lowerCAmelCase_ : Any = state_dict.pop(f'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ : Dict = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ : int = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ : Dict = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( lowercase__ : Any ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ : Dict = dct.pop(lowercase__ )
lowerCAmelCase_ : List[Any] = val
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[str] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any=True ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ViTConfig()
# patch_size
if model_name[-1] == "8":
lowerCAmelCase_ : Dict = 8
# set labels if required
if not base_model:
lowerCAmelCase_ : str = 1000
lowerCAmelCase_ : List[Any] = """huggingface/label-files"""
lowerCAmelCase_ : Optional[int] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : str = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[str] = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Any = idalabel
lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
lowerCAmelCase_ : Union[str, Any] = 384
lowerCAmelCase_ : Any = 1536
lowerCAmelCase_ : Union[str, Any] = 12
lowerCAmelCase_ : str = 6
# load original model from torch hub
lowerCAmelCase_ : Any = torch.hub.load("""facebookresearch/dino:main""" , lowercase__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ : Any = original_model.state_dict()
if base_model:
remove_classification_head_(lowercase__ )
lowerCAmelCase_ : Dict = create_rename_keys(lowercase__ , base_model=lowercase__ )
for src, dest in rename_keys:
rename_key(lowercase__ , lowercase__ , lowercase__ )
read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ )
# load HuggingFace model
if base_model:
lowerCAmelCase_ : int = ViTModel(lowercase__ , add_pooling_layer=lowercase__ ).eval()
else:
lowerCAmelCase_ : Union[str, Any] = ViTForImageClassification(lowercase__ ).eval()
model.load_state_dict(lowercase__ )
# Check outputs on an image, prepared by ViTImageProcessor
lowerCAmelCase_ : List[str] = ViTImageProcessor()
lowerCAmelCase_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCAmelCase_ : List[str] = encoding["""pixel_values"""]
lowerCAmelCase_ : Optional[int] = model(lowercase__ )
if base_model:
lowerCAmelCase_ : Union[str, Any] = original_model(lowercase__ )
assert torch.allclose(lowercase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
lowerCAmelCase_ : int = original_model(lowercase__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__UpperCAmelCase = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 28
| 1
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__UpperCAmelCase = parser.parse_args()
if args.model_type == "roberta":
__UpperCAmelCase = RobertaForMaskedLM.from_pretrained(args.model_name)
__UpperCAmelCase = 'roberta'
elif args.model_type == "gpt2":
__UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name)
__UpperCAmelCase = 'transformer'
__UpperCAmelCase = model.state_dict()
__UpperCAmelCase = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__UpperCAmelCase = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__UpperCAmelCase = f"""{prefix}.embeddings.{w}.weight"""
__UpperCAmelCase = state_dict[param_name]
for w in ["weight", "bias"]:
__UpperCAmelCase = f"""{prefix}.embeddings.LayerNorm.{w}"""
__UpperCAmelCase = state_dict[param_name]
# Transformer Blocks #
__UpperCAmelCase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__UpperCAmelCase = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__UpperCAmelCase = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[f"""lm_head.dense.{w}"""]
__UpperCAmelCase = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__UpperCAmelCase = state_dict[f"""{prefix}.ln_f.{w}"""]
__UpperCAmelCase = state_dict['lm_head.weight']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 28
|
from math import factorial, pi
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowerCAmelCase_ : Optional[int] = float(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase__ ) )
def __UpperCamelCase ( lowercase__ : float , lowercase__ : int = 30 ) -> float:
'''simple docstring'''
if not isinstance(lowercase__ , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase__ , lowercase__ ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowerCAmelCase_ : int = float(lowercase__ )
lowerCAmelCase_ : Optional[int] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 28
| 1
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __a ( __UpperCamelCase ):
__snake_case : int = (KDPMaDiscreteScheduler,)
__snake_case : Optional[int] = 10
def A ( self : Optional[int] , **UpperCAmelCase : Dict ):
lowerCAmelCase_ : Tuple = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**UpperCAmelCase )
return config
def A ( self : Dict ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase )
def A ( self : List[str] ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=UpperCAmelCase , beta_end=UpperCAmelCase )
def A ( self : Dict ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase )
def A ( self : Tuple ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : Any = self.scheduler_classes[0]
lowerCAmelCase_ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase_ : List[Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : str = self.dummy_model()
lowerCAmelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : List[str] = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Dict = model(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[str] = output.prev_sample
lowerCAmelCase_ : List[str] = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCAmelCase_ : Optional[Any] = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0002 ) < 1e-3
def A ( self : Tuple ):
if torch_device == "mps":
return
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : Tuple = self.get_scheduler_config()
lowerCAmelCase_ : List[str] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase_ : int = self.dummy_model()
lowerCAmelCase_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase_ : Any = sample.to(UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : str = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[str] = model(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = output.prev_sample
lowerCAmelCase_ : Any = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCAmelCase_ : Dict = torch.mean(torch.abs(UpperCAmelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
def A ( self : Any ):
if torch_device == "mps":
return
lowerCAmelCase_ : str = self.scheduler_classes[0]
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Union[str, Any] = scheduler_class(**UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase )
lowerCAmelCase_ : List[str] = self.dummy_model()
lowerCAmelCase_ : List[Any] = self.dummy_sample_deter.to(UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCAmelCase_ : int = scheduler.scale_model_input(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = model(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = output.prev_sample
lowerCAmelCase_ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase ) )
lowerCAmelCase_ : Dict = torch.mean(torch.abs(UpperCAmelCase ) )
if str(UpperCAmelCase ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1e-2
assert abs(result_mean.item() - 0.0266 ) < 1e-3
| 28
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__UpperCAmelCase = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __a ( __UpperCamelCase ):
__snake_case : int = """facebook/nllb-200-distilled-600M"""
__snake_case : Optional[int] = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
__snake_case : str = """translator"""
__snake_case : Any = AutoTokenizer
__snake_case : Union[str, Any] = AutoModelForSeqaSeqLM
__snake_case : Optional[int] = LANGUAGE_CODES
__snake_case : int = ["""text""", """text""", """text"""]
__snake_case : str = ["""text"""]
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
lowerCAmelCase_ : List[Any] = self.lang_to_code[src_lang]
lowerCAmelCase_ : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase , return_tensors="""pt""" , src_lang=UpperCAmelCase , tgt_lang=UpperCAmelCase )
def A ( self : Optional[Any] , UpperCAmelCase : str ):
return self.model.generate(**UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase )
| 28
| 1
|
import warnings
from functools import wraps
from typing import Callable
def __UpperCamelCase ( lowercase__ : Callable ) -> Callable:
'''simple docstring'''
@wraps(lowercase__ )
def _inner_fn(*lowercase__ : List[Any] , **lowercase__ : str ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , lowercase__ , )
return fn(*lowercase__ , **lowercase__ )
return _inner_fn
| 28
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """huggingface/label-files"""
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : Tuple = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ : Optional[Any] = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase_ : Tuple = BitConfig(
conv_layer=lowercase__ , num_labels=1000 , idalabel=lowercase__ , labelaid=lowercase__ , )
return config
def __UpperCamelCase ( lowercase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
lowerCAmelCase_ : str = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCAmelCase_ : Tuple = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
lowerCAmelCase_ : Dict = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
lowerCAmelCase_ : List[str] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase_ : Any = """bit.encoder.""" + name
return name
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : List[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Any=False ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_config(lowercase__ )
# load original model from timm
lowerCAmelCase_ : str = create_model(lowercase__ , pretrained=lowercase__ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase_ : Any = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase_ : List[str] = state_dict.pop(lowercase__ )
lowerCAmelCase_ : Dict = val.squeeze() if """head""" in key else val
# load HuggingFace model
lowerCAmelCase_ : Tuple = BitForImageClassification(lowercase__ )
model.eval()
model.load_state_dict(lowercase__ )
# create image processor
lowerCAmelCase_ : Tuple = create_transform(**resolve_data_config({} , model=lowercase__ ) )
lowerCAmelCase_ : Union[str, Any] = transform.transforms
lowerCAmelCase_ : str = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
lowerCAmelCase_ : List[str] = BitImageProcessor(
do_resize=lowercase__ , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase__ , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=lowercase__ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase_ : int = prepare_img()
lowerCAmelCase_ : Tuple = transform(lowercase__ ).unsqueeze(0 )
lowerCAmelCase_ : List[str] = processor(lowercase__ , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(lowercase__ , lowercase__ )
# verify logits
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(lowercase__ )
lowerCAmelCase_ : List[str] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase_ : Optional[Any] = timm_model(lowercase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase__ , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
if push_to_hub:
print(f'Pushing model {model_name} and processor to the hub' )
model.push_to_hub(f'ybelkada/{model_name}' )
processor.push_to_hub(f'ybelkada/{model_name}' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Any=64 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Any=True , UpperCAmelCase : str=True , UpperCAmelCase : str=32 , UpperCAmelCase : str=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : str=10 , UpperCAmelCase : Optional[Any]=0.02 , UpperCAmelCase : Optional[Any]=[1, 16, 4, 4] , UpperCAmelCase : Union[str, Any]=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : str = batch_size
lowerCAmelCase_ : int = image_size
lowerCAmelCase_ : Tuple = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : List[str] = is_training
lowerCAmelCase_ : List[str] = use_labels
lowerCAmelCase_ : str = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Dict = hidden_dropout_prob
lowerCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : int = scope
lowerCAmelCase_ : Tuple = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : int = (self.image_size // 32) ** 2
lowerCAmelCase_ : Dict = num_patches + 1
def A ( self : Any ):
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def A ( self : Optional[Any] ):
lowerCAmelCase_ : List[Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCAmelCase , )
def A ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Tuple = ViTHybridModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : List[str] = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any ):
lowerCAmelCase_ : Tuple = self.type_sequence_label_size
lowerCAmelCase_ : Tuple = ViTHybridForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : int = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : Dict = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : int = False
__snake_case : Tuple = False
__snake_case : Tuple = False
def A ( self : int ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : str = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def A ( self : Dict ):
pass
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[str] = [*signature.parameters.keys()]
lowerCAmelCase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Union[str, Any] = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Any = model_class(config=UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Tuple = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A ( self : int ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = ViTHybridModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : int ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : Tuple ):
lowerCAmelCase_ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : Optional[Any] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : List[str] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
lowerCAmelCase_ : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 28
| 1
|
__UpperCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __UpperCamelCase ( lowercase__ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : List[str] = f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(lowercase__ )
lowerCAmelCase_ : Union[str, Any] = """""".join(bin(lowercase__ )[2:].zfill(8 ) for byte in data )
lowerCAmelCase_ : str = len(lowercase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowerCAmelCase_ : Any = b"""=""" * ((6 - len(lowercase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowercase__ ) % 6)
else:
lowerCAmelCase_ : Optional[Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(lowercase__ ) , 6 ) ).encode()
+ padding
)
def __UpperCamelCase ( lowercase__ : str ) -> bytes:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) and not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(lowercase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowercase__ , lowercase__ ):
try:
lowerCAmelCase_ : str = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowerCAmelCase_ : int = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowercase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowerCAmelCase_ : Tuple = encoded_data[:-padding]
lowerCAmelCase_ : Tuple = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowerCAmelCase_ : int = """""".join(
bin(B64_CHARSET.index(lowercase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowerCAmelCase_ : str = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(lowercase__ ) , 8 )
]
return bytes(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class __a ( __UpperCamelCase ):
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Dict ):
warnings.warn(
"""The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use GLPNImageProcessor instead.""" , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 28
| 1
|
def __UpperCamelCase ( lowercase__ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
lowerCAmelCase_ : Union[str, Any] = [True] * (num + 1)
lowerCAmelCase_ : List[str] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowercase__ ):
lowerCAmelCase_ : Optional[int] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 28
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( __UpperCamelCase ):
__snake_case : Any = ["""image_processor""", """tokenizer"""]
__snake_case : Tuple = """BlipImageProcessor"""
__snake_case : int = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ):
lowerCAmelCase_ : str = False
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase : ImageInput = None , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Tuple , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
lowerCAmelCase_ : str = self.tokenizer
lowerCAmelCase_ : List[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
return text_encoding
# add pixel_values
lowerCAmelCase_ : Union[str, Any] = self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase )
if text is not None:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
else:
lowerCAmelCase_ : int = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase )
return encoding_image_processor
def A ( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : int ):
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A ( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[Any] ):
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A ( self : int ):
lowerCAmelCase_ : int = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 28
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a ( __UpperCamelCase ):
def A ( self : Optional[Any] ):
lowerCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , """num_heads""" ) )
class __a :
def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : Dict=64 , UpperCAmelCase : int=3 , UpperCAmelCase : Optional[int]=[16, 48, 96] , UpperCAmelCase : Union[str, Any]=[1, 3, 6] , UpperCAmelCase : Optional[Any]=[1, 2, 10] , UpperCAmelCase : str=[7, 3, 3] , UpperCAmelCase : Optional[Any]=[4, 2, 2] , UpperCAmelCase : Any=[2, 1, 1] , UpperCAmelCase : List[Any]=[2, 2, 2] , UpperCAmelCase : List[str]=[False, False, True] , UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : List[str]=1e-1_2 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=2 , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Any = image_size
lowerCAmelCase_ : Union[str, Any] = patch_sizes
lowerCAmelCase_ : Any = patch_stride
lowerCAmelCase_ : str = patch_padding
lowerCAmelCase_ : str = is_training
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Optional[Any] = num_channels
lowerCAmelCase_ : Optional[Any] = embed_dim
lowerCAmelCase_ : int = num_heads
lowerCAmelCase_ : str = stride_kv
lowerCAmelCase_ : Union[str, Any] = depth
lowerCAmelCase_ : Optional[Any] = cls_token
lowerCAmelCase_ : List[Any] = attention_drop_rate
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : List[str] = layer_norm_eps
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : int = None
if self.use_labels:
# create a random int32 tensor of given shape
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : Optional[int] = TFCvtModel(config=UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(UpperCAmelCase , training=UpperCAmelCase )
lowerCAmelCase_ : int = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
lowerCAmelCase_ : str = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
lowerCAmelCase_ : Union[str, Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Dict , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] ):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : Any = TFCvtForImageClassification(UpperCAmelCase )
lowerCAmelCase_ : Any = model(UpperCAmelCase , labels=UpperCAmelCase , training=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int ):
lowerCAmelCase_ : Any = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = config_and_inputs
lowerCAmelCase_ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__snake_case : Any = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
__snake_case : List[Any] = False
__snake_case : Dict = False
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Optional[int] = False
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = TFCvtModelTester(self )
lowerCAmelCase_ : Optional[int] = TFCvtConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : Tuple ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def A ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def A ( self : List[str] ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def A ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def A ( self : int ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def A ( self : Any ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def A ( self : List[str] ):
lowerCAmelCase_ : List[str] = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def A ( self : Dict ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : str = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : Union[str, Any] ):
def check_hidden_states_output(UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : Tuple = model_class(UpperCAmelCase )
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : List[str] = outputs.hidden_states
lowerCAmelCase_ : int = len(self.model_tester.depth )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : int ):
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def A ( self : List[str] ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = TFCvtModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : Any ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : int ):
lowerCAmelCase_ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : int = self.default_image_processor
lowerCAmelCase_ : Union[str, Any] = prepare_img()
lowerCAmelCase_ : Any = image_processor(images=UpperCAmelCase , return_tensors="""tf""" )
# forward pass
lowerCAmelCase_ : Optional[Any] = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : Dict = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : List[str] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , UpperCAmelCase , atol=1e-4 ) )
| 28
|
from math import ceil
def __UpperCamelCase ( lowercase__ : int = 1001 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[str] = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase_ : Optional[Any] = 2 * i + 1
lowerCAmelCase_ : Union[str, Any] = 2 * i
lowerCAmelCase_ : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 28
| 1
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('_T')
class __a ( Generic[_T] ):
def __init__( self : List[str] , UpperCAmelCase : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : int ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Tuple ):
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def A ( self : List[str] , UpperCAmelCase : _T ):
self._stacka.append(UpperCAmelCase )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = self._stacka.pop
lowerCAmelCase_ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 28
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger('transformers.models.speecht5')
def __UpperCamelCase ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> List[str]:
'''simple docstring'''
hf_model.apply_weight_norm()
lowerCAmelCase_ : Dict = checkpoint["""input_conv.weight_g"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.weight_v"""]
lowerCAmelCase_ : Any = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase_ : Tuple = checkpoint[f'upsamples.{i}.1.weight_g']
lowerCAmelCase_ : Any = checkpoint[f'upsamples.{i}.1.weight_v']
lowerCAmelCase_ : int = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
lowerCAmelCase_ : Dict = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
lowerCAmelCase_ : Tuple = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
lowerCAmelCase_ : Optional[Any] = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
lowerCAmelCase_ : str = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
lowerCAmelCase_ : str = checkpoint["""output_conv.1.weight_g"""]
lowerCAmelCase_ : Dict = checkpoint["""output_conv.1.weight_v"""]
lowerCAmelCase_ : Optional[int] = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def __UpperCamelCase ( lowercase__ : str , lowercase__ : Tuple , lowercase__ : Dict , lowercase__ : List[Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
lowerCAmelCase_ : Optional[Any] = SpeechTaHifiGanConfig.from_pretrained(lowercase__ )
else:
lowerCAmelCase_ : Any = SpeechTaHifiGanConfig()
lowerCAmelCase_ : str = SpeechTaHifiGan(lowercase__ )
lowerCAmelCase_ : Tuple = torch.load(lowercase__ )
load_weights(orig_checkpoint["""model"""]["""generator"""] , lowercase__ , lowercase__ )
lowerCAmelCase_ : Optional[int] = np.load(lowercase__ )
lowerCAmelCase_ : Any = stats[0].reshape(-1 )
lowerCAmelCase_ : List[str] = stats[1].reshape(-1 )
lowerCAmelCase_ : Optional[int] = torch.from_numpy(lowercase__ ).float()
lowerCAmelCase_ : Any = torch.from_numpy(lowercase__ ).float()
model.save_pretrained(lowercase__ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(lowercase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__UpperCAmelCase = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 28
| 1
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
|
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
from __future__ import annotations
def __UpperCamelCase ( lowercase__ : list[float] ) -> float:
'''simple docstring'''
lowerCAmelCase_ : Tuple = 0.00
lowerCAmelCase_ : List[Any] = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase_ : str = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def __UpperCamelCase ( lowercase__ : list[float] ) -> float:
'''simple docstring'''
lowerCAmelCase_ : int = 0.00
lowerCAmelCase_ : Dict = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase_ : Tuple = f'Resistor at index {index} has a negative value!'
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/electra-small-generator': 5_12,
'google/electra-base-generator': 5_12,
'google/electra-large-generator': 5_12,
'google/electra-small-discriminator': 5_12,
'google/electra-base-discriminator': 5_12,
'google/electra-large-discriminator': 5_12,
}
__UpperCAmelCase = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __a ( __UpperCamelCase ):
__snake_case : List[Any] = VOCAB_FILES_NAMES
__snake_case : List[str] = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_INIT_CONFIGURATION
__snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : str = ElectraTokenizer
def __init__( self : List[Any] , UpperCAmelCase : Any=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict="[UNK]" , UpperCAmelCase : Any="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : Union[str, Any]="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , UpperCAmelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , UpperCAmelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Optional[Any] = getattr(UpperCAmelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : List[Any] = do_lower_case
lowerCAmelCase_ : Tuple = strip_accents
lowerCAmelCase_ : Union[str, Any] = tokenize_chinese_chars
lowerCAmelCase_ : int = normalizer_class(**UpperCAmelCase )
lowerCAmelCase_ : str = do_lower_case
def A ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any]=None ):
lowerCAmelCase_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __a ( __UpperCamelCase ):
__snake_case : str = DistilBertTokenizer
__snake_case : Dict = DistilBertTokenizerFast
__snake_case : Tuple = True
@slow
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowerCAmelCase_ : Optional[int] = tokenizer.encode("""sequence builders""" , add_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase , UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 28
|
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __a ( unittest.TestCase ):
def A ( self : List[Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCAmelCase_ : Optional[Any] = Vector()
def A ( self : List[str] ):
lowerCAmelCase_ : Tuple = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase ) , """(0,0,0,0,0,1)""" )
def A ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase ) , 4 )
def A ( self : Dict ):
lowerCAmelCase_ : Dict = Vector([1, 2] )
lowerCAmelCase_ : str = Vector([1, 2, 3, 4, 5] )
lowerCAmelCase_ : Optional[int] = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : Dict = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[int] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Dict = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Dict = Vector([1, 2, 3] )
lowerCAmelCase_ : Optional[int] = Vector([2, -1, 4] ) # for test of dot product
lowerCAmelCase_ : str = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def A ( self : List[str] ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def A ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def A ( self : Optional[Any] ):
lowerCAmelCase_ : Optional[Any] = Vector([1, 2, 3] )
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase , UpperCAmelCase ) ) , """(3,4,7)""" )
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[Any] = Vector([1, 0, 0, 0, 0, 0] )
lowerCAmelCase_ : int = x.copy()
self.assertEqual(str(UpperCAmelCase ) , str(UpperCAmelCase ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Union[str, Any] = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase ) , """(0,1,0)""" )
def A ( self : Any ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : List[str] = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Tuple ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Union[str, Any] = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase , UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def A ( self : Optional[int] ):
lowerCAmelCase_ : Dict = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCAmelCase_ : Any = Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def A ( self : Tuple ):
lowerCAmelCase_ : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase ) )
def A ( self : Optional[int] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def A ( self : Dict ):
lowerCAmelCase_ : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCAmelCase_ : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def A ( self : Optional[int] ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 28
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __UpperCamelCase ( lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __UpperCamelCase ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : str = create_tensor(lowercase__ )
lowerCAmelCase_ : int = gather(lowercase__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __UpperCamelCase ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [state.process_index]
lowerCAmelCase_ : Optional[int] = gather_object(lowercase__ )
assert len(lowercase__ ) == state.num_processes, f'{gathered_obj}, {len(lowercase__ )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), f'{gathered_obj} != {list(range(state.num_processes ) )}'
def __UpperCamelCase ( lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : int = create_tensor(lowercase__ )
lowerCAmelCase_ : Any = broadcast(lowercase__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __UpperCamelCase ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
if state.is_main_process:
lowerCAmelCase_ : Dict = torch.arange(state.num_processes + 1 ).to(state.device )
else:
lowerCAmelCase_ : int = torch.arange(state.num_processes ).to(state.device )
lowerCAmelCase_ : Optional[Any] = pad_across_processes(lowercase__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __UpperCamelCase ( lowercase__ : Tuple ) -> Tuple:
'''simple docstring'''
if state.num_processes != 2:
return
lowerCAmelCase_ : Union[str, Any] = create_tensor(lowercase__ )
lowerCAmelCase_ : Optional[int] = reduce(lowercase__ , """sum""" )
lowerCAmelCase_ : int = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'{reduced_tensor} != {truth_tensor}'
def __UpperCamelCase ( lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
if state.num_processes != 2:
return
lowerCAmelCase_ : Tuple = create_tensor(lowercase__ )
lowerCAmelCase_ : str = reduce(lowercase__ , """mean""" )
lowerCAmelCase_ : List[str] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowercase__ , lowercase__ ), f'{reduced_tensor} != {truth_tensor}'
def __UpperCamelCase ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
main()
def __UpperCamelCase ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ : Tuple = PartialState()
state.print(f'State: {state}' )
state.print("""testing gather""" )
test_gather(lowercase__ )
state.print("""testing gather_object""" )
test_gather_object(lowercase__ )
state.print("""testing broadcast""" )
test_broadcast(lowercase__ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(lowercase__ )
state.print("""testing reduce_sum""" )
test_reduce_sum(lowercase__ )
state.print("""testing reduce_mean""" )
test_reduce_mean(lowercase__ )
if __name__ == "__main__":
main()
| 28
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Union[str, Any] = """pixel_values"""
__snake_case : Optional[Any] = False
__snake_case : Dict = TimmBackboneConfig
def __init__( self : List[str] , UpperCAmelCase : int , **UpperCAmelCase : List[str] ):
requires_backends(self , """timm""" )
super().__init__(UpperCAmelCase )
lowerCAmelCase_ : List[Any] = config
if config.backbone is None:
raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" )
if config.backbone not in timm.list_models():
raise ValueError(F'backbone {config.backbone} is not supported by timm.' )
if hasattr(UpperCAmelCase , """out_features""" ) and config.out_features is not None:
raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" )
lowerCAmelCase_ : List[str] = getattr(UpperCAmelCase , """use_pretrained_backbone""" , UpperCAmelCase )
if pretrained is None:
raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCAmelCase_ : str = config.out_indices if getattr(UpperCAmelCase , """out_indices""" , UpperCAmelCase ) is not None else (-1,)
lowerCAmelCase_ : Optional[int] = timm.create_model(
config.backbone , pretrained=UpperCAmelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCAmelCase , **UpperCAmelCase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCAmelCase_ : Union[str, Any] = self._backbone.return_layers
lowerCAmelCase_ : Dict = {layer["""module"""]: str(UpperCAmelCase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(UpperCAmelCase )
@classmethod
def A ( cls : Dict , UpperCAmelCase : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ):
requires_backends(cls , ["""vision""", """timm"""] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""config""" , TimmBackboneConfig() )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_timm_backbone""" , UpperCAmelCase )
if not use_timm:
raise ValueError("""use_timm_backbone must be True for timm backbones""" )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""num_channels""" , config.num_channels )
lowerCAmelCase_ : Tuple = kwargs.pop("""features_only""" , config.features_only )
lowerCAmelCase_ : List[str] = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone )
lowerCAmelCase_ : Optional[Any] = kwargs.pop("""out_indices""" , config.out_indices )
lowerCAmelCase_ : Optional[Any] = TimmBackboneConfig(
backbone=UpperCAmelCase , num_channels=UpperCAmelCase , features_only=UpperCAmelCase , use_pretrained_backbone=UpperCAmelCase , out_indices=UpperCAmelCase , )
return super()._from_config(UpperCAmelCase , **UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=None , **UpperCAmelCase : Any ):
lowerCAmelCase_ : int = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("""Cannot output attentions for timm backbones at the moment""" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCAmelCase_ : Optional[Any] = self._all_layers
lowerCAmelCase_ : List[Any] = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : str = self._return_layers
lowerCAmelCase_ : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCAmelCase_ : Tuple = self._backbone(UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : List[str] = tuple(UpperCAmelCase )
lowerCAmelCase_ : int = tuple(UpperCAmelCase ) if hidden_states is not None else None
if not return_dict:
lowerCAmelCase_ : Optional[Any] = (feature_maps,)
if output_hidden_states:
lowerCAmelCase_ : Tuple = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=UpperCAmelCase , hidden_states=UpperCAmelCase , attentions=UpperCAmelCase )
| 28
| 1
|
import argparse
import os
import re
import packaging.version
__UpperCAmelCase = 'examples/'
__UpperCAmelCase = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
__UpperCAmelCase = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
__UpperCAmelCase = 'README.md'
def __UpperCamelCase ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Dict = replace.replace("""VERSION""" , lowercase__ )
lowerCAmelCase_ : Any = re_pattern.sub(lowercase__ , lowercase__ )
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(lowercase__ )
def __UpperCamelCase ( lowercase__ : Any ) -> Union[str, Any]:
'''simple docstring'''
for folder, directories, fnames in os.walk(lowercase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(lowercase__ , lowercase__ ) , lowercase__ , pattern="""examples""" )
def __UpperCamelCase ( lowercase__ : Optional[int] , lowercase__ : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase__ , lowercase__ , lowercase__ )
if not patch:
update_version_in_examples(lowercase__ )
def __UpperCamelCase ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : int = """🤗 Transformers currently provides the following architectures"""
lowerCAmelCase_ : List[Any] = """1. Want to contribute a new model?"""
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ : str = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : Tuple = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
lowerCAmelCase_ : List[str] = lines[index].replace(
"""https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , )
index += 1
with open(lowercase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowercase__ )
def __UpperCamelCase ( ) -> List[str]:
'''simple docstring'''
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
lowerCAmelCase_ : int = f.read()
lowerCAmelCase_ : List[Any] = REPLACE_PATTERNS["""init"""][0].search(lowercase__ ).groups()[0]
return packaging.version.parse(lowercase__ )
def __UpperCamelCase ( lowercase__ : Optional[int]=False ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
lowerCAmelCase_ : Optional[Any] = default_version.base_version
elif patch:
lowerCAmelCase_ : Optional[int] = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
lowerCAmelCase_ : Union[str, Any] = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Union[str, Any] = input(f'Which version are you releasing? [{default_version}]' )
if len(lowercase__ ) == 0:
lowerCAmelCase_ : int = default_version
print(f'Updating version to {version}.' )
global_version_update(lowercase__ , patch=lowercase__ )
if not patch:
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Dict = get_version()
lowerCAmelCase_ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
lowerCAmelCase_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : str = input(f'Which version are we developing now? [{dev_version}]' )
if len(lowercase__ ) == 0:
lowerCAmelCase_ : Dict = dev_version
print(f'Updating version to {version}.' )
global_version_update(lowercase__ )
print("""Cleaning main README, don't forget to run `make fix-copies`.""" )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
__UpperCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 28
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class __a ( __UpperCamelCase ):
__snake_case : Optional[Any] = """mra"""
def __init__( self : List[str] , UpperCAmelCase : Tuple=5_02_65 , UpperCAmelCase : str=7_68 , UpperCAmelCase : int=12 , UpperCAmelCase : Dict=12 , UpperCAmelCase : Tuple=30_72 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : List[str]=5_12 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : int=1e-5 , UpperCAmelCase : Optional[int]="absolute" , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any="full" , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Any=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , **UpperCAmelCase : Tuple , ):
super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = vocab_size
lowerCAmelCase_ : Optional[int] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Tuple = num_attention_heads
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : str = initializer_range
lowerCAmelCase_ : str = type_vocab_size
lowerCAmelCase_ : str = layer_norm_eps
lowerCAmelCase_ : Optional[int] = position_embedding_type
lowerCAmelCase_ : Any = block_per_row
lowerCAmelCase_ : int = approx_mode
lowerCAmelCase_ : Union[str, Any] = initial_prior_first_n_blocks
lowerCAmelCase_ : Dict = initial_prior_diagonal_n_blocks
| 28
| 1
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __a ( __UpperCamelCase ):
def A ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCAmelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCAmelCase , """num_attention_heads""" ) )
class __a :
def __init__( self : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : str=64 , UpperCAmelCase : Any=3 , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : Dict=[1_28, 2_56, 3_84] , UpperCAmelCase : Optional[int]=[4, 6, 8] , UpperCAmelCase : Dict=[2, 3, 4] , UpperCAmelCase : Optional[Any]=[16, 16, 16] , UpperCAmelCase : Tuple=0 , UpperCAmelCase : str=[2, 2, 2] , UpperCAmelCase : Optional[Any]=[2, 2, 2] , UpperCAmelCase : Tuple=0.02 , UpperCAmelCase : Any=True , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Any=2 , ):
lowerCAmelCase_ : List[str] = parent
lowerCAmelCase_ : Any = batch_size
lowerCAmelCase_ : Tuple = image_size
lowerCAmelCase_ : str = num_channels
lowerCAmelCase_ : Optional[int] = kernel_size
lowerCAmelCase_ : Optional[int] = stride
lowerCAmelCase_ : Tuple = padding
lowerCAmelCase_ : Union[str, Any] = hidden_sizes
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : int = depths
lowerCAmelCase_ : Any = key_dim
lowerCAmelCase_ : Dict = drop_path_rate
lowerCAmelCase_ : Optional[int] = patch_size
lowerCAmelCase_ : int = attention_ratio
lowerCAmelCase_ : List[Any] = mlp_ratio
lowerCAmelCase_ : int = initializer_range
lowerCAmelCase_ : Optional[Any] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Union[str, Any] = use_labels
lowerCAmelCase_ : Union[str, Any] = num_labels
lowerCAmelCase_ : List[Any] = initializer_range
def A ( self : Union[str, Any] ):
lowerCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : Any ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def A ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : str ):
lowerCAmelCase_ : Tuple = LevitModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(UpperCAmelCase )
lowerCAmelCase_ : Dict = (self.image_size, self.image_size)
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_ : Union[str, Any] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowerCAmelCase_ : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def A ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
lowerCAmelCase_ : List[Any] = self.num_labels
lowerCAmelCase_ : int = LevitForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase_ : str = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : List[str] = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__snake_case : List[str] = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : List[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = False
__snake_case : Union[str, Any] = False
def A ( self : Dict ):
lowerCAmelCase_ : Tuple = LevitModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def A ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : Tuple ):
return
@unittest.skip(reason="""Levit does not use inputs_embeds""" )
def A ( self : List[str] ):
pass
@unittest.skip(reason="""Levit does not support input and output embeddings""" )
def A ( self : Optional[Any] ):
pass
@unittest.skip(reason="""Levit does not output attentions""" )
def A ( self : Dict ):
pass
def A ( self : Union[str, Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : List[Any] = model_class(UpperCAmelCase )
lowerCAmelCase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : int = [*signature.parameters.keys()]
lowerCAmelCase_ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A ( self : List[str] ):
def check_hidden_states_output(UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ):
lowerCAmelCase_ : Dict = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Optional[int] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase_ : Optional[int] = outputs.hidden_states
lowerCAmelCase_ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowerCAmelCase_ : List[str] = (self.model_tester.image_size, self.model_tester.image_size)
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for _ in range(4 ):
lowerCAmelCase_ : List[str] = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowerCAmelCase_ : Optional[Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A ( self : List[Any] ):
pass
def A ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=False ):
lowerCAmelCase_ : Any = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : Dict ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A ( self : Any ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
def A ( self : str ):
if not self.model_tester.is_training:
return
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCAmelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowerCAmelCase_ : Optional[int] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCAmelCase_ : Dict = model(**UpperCAmelCase ).loss
loss.backward()
def A ( self : List[str] ):
lowerCAmelCase_ , lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCAmelCase_ : int = False
lowerCAmelCase_ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowerCAmelCase_ : Any = model_class(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase )
model.train()
lowerCAmelCase_ : List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCAmelCase_ : List[str] = model(**UpperCAmelCase ).loss
loss.backward()
def A ( self : Any ):
lowerCAmelCase_ , lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = [
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCAmelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'Testing {model_class} with {problem_type["title"]}' ):
lowerCAmelCase_ : List[Any] = problem_type["""title"""]
lowerCAmelCase_ : List[str] = problem_type["""num_labels"""]
lowerCAmelCase_ : Optional[Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
lowerCAmelCase_ : Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if problem_type["num_labels"] > 1:
lowerCAmelCase_ : Optional[Any] = inputs["""labels"""].unsqueeze(1 ).repeat(1 , problem_type["""num_labels"""] )
lowerCAmelCase_ : List[str] = inputs["""labels"""].to(problem_type["""dtype"""] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCAmelCase ) as warning_list:
lowerCAmelCase_ : Optional[int] = model(**UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def A ( self : Union[str, Any] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Dict = LevitModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def __UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def A ( self : Any ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : Optional[int] ):
lowerCAmelCase_ : List[str] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCAmelCase )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Tuple = prepare_img()
lowerCAmelCase_ : Optional[int] = image_processor(images=UpperCAmelCase , return_tensors="""pt""" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : str = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase_ : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = torch.tensor([1.0448, -0.3745, -1.8317] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
| 28
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( lowercase__ : int ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
raise TypeError("""Undefined for non-integers""" )
elif precision < 1:
raise ValueError("""Undefined for non-natural numbers""" )
lowerCAmelCase_ : Any = precision
lowerCAmelCase_ : Any = ceil(precision / 14 )
lowerCAmelCase_ : Optional[Any] = 426880 * Decimal(10005 ).sqrt()
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Optional[int] = 13591409
lowerCAmelCase_ : Union[str, Any] = Decimal(lowercase__ )
for k in range(1 , lowercase__ ):
lowerCAmelCase_ : Optional[Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(lowercase__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__UpperCAmelCase = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 28
| 1
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __UpperCamelCase ( lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Any = args.pruning_method
lowerCAmelCase_ : List[Any] = args.threshold
lowerCAmelCase_ : Optional[Any] = args.model_name_or_path.rstrip("""/""" )
lowerCAmelCase_ : Any = args.target_model_path
print(f'Load fine-pruned model from {model_name_or_path}' )
lowerCAmelCase_ : Any = torch.load(os.path.join(lowercase__ , """pytorch_model.bin""" ) )
lowerCAmelCase_ : int = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase_ : Dict = tensor
print(f'Copied layer {name}' )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase_ : int = tensor
print(f'Copied layer {name}' )
elif "bias" in name:
lowerCAmelCase_ : Any = tensor
print(f'Copied layer {name}' )
else:
if pruning_method == "magnitude":
lowerCAmelCase_ : Tuple = MagnitudeBinarizer.apply(inputs=lowercase__ , threshold=lowercase__ )
lowerCAmelCase_ : List[Any] = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase_ : int = name[:-6]
lowerCAmelCase_ : Any = model[f'{prefix_}mask_scores']
lowerCAmelCase_ : str = TopKBinarizer.apply(lowercase__ , lowercase__ )
lowerCAmelCase_ : int = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase_ : Tuple = name[:-6]
lowerCAmelCase_ : Any = model[f'{prefix_}mask_scores']
lowerCAmelCase_ : Optional[Any] = ThresholdBinarizer.apply(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ : str = tensor * mask
print(f'Pruned layer {name}' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase_ : Optional[Any] = name[:-6]
lowerCAmelCase_ : Optional[Any] = model[f'{prefix_}mask_scores']
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = -0.1, 1.1
lowerCAmelCase_ : Dict = torch.sigmoid(lowercase__ )
lowerCAmelCase_ : Optional[int] = s * (r - l) + l
lowerCAmelCase_ : str = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase_ : str = tensor * mask
print(f'Pruned layer {name}' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowerCAmelCase_ : Optional[int] = os.path.join(
os.path.dirname(lowercase__ ) , f'bertarized_{os.path.basename(lowercase__ )}' )
if not os.path.isdir(lowercase__ ):
shutil.copytree(lowercase__ , lowercase__ )
print(f'\nCreated folder {target_model_path}' )
torch.save(lowercase__ , os.path.join(lowercase__ , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
__UpperCAmelCase = parser.parse_args()
main(args)
| 28
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __a ( __UpperCamelCase ):
__snake_case : Union[str, Any] = """gptj"""
__snake_case : int = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , UpperCAmelCase : Optional[int]=5_04_00 , UpperCAmelCase : Optional[int]=20_48 , UpperCAmelCase : str=40_96 , UpperCAmelCase : Any=28 , UpperCAmelCase : Dict=16 , UpperCAmelCase : List[str]=64 , UpperCAmelCase : int=None , UpperCAmelCase : Union[str, Any]="gelu_new" , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : Optional[Any]=1e-5 , UpperCAmelCase : List[Any]=0.02 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=5_02_56 , UpperCAmelCase : int=5_02_56 , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Any , ):
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = n_positions
lowerCAmelCase_ : Union[str, Any] = n_embd
lowerCAmelCase_ : List[Any] = n_layer
lowerCAmelCase_ : List[Any] = n_head
lowerCAmelCase_ : Tuple = n_inner
lowerCAmelCase_ : Optional[Any] = rotary_dim
lowerCAmelCase_ : str = activation_function
lowerCAmelCase_ : str = resid_pdrop
lowerCAmelCase_ : List[Any] = embd_pdrop
lowerCAmelCase_ : Dict = attn_pdrop
lowerCAmelCase_ : Any = layer_norm_epsilon
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[int] = use_cache
lowerCAmelCase_ : Optional[int] = bos_token_id
lowerCAmelCase_ : Any = eos_token_id
super().__init__(
bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , **UpperCAmelCase )
class __a ( __UpperCamelCase ):
def __init__( self : Any , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : str = "default" , UpperCAmelCase : List[PatchingSpec] = None , UpperCAmelCase : bool = False , ):
super().__init__(UpperCAmelCase , task=UpperCAmelCase , patching_specs=UpperCAmelCase , use_past=UpperCAmelCase )
if not getattr(self._config , """pad_token_id""" , UpperCAmelCase ):
# TODO: how to do that better?
lowerCAmelCase_ : List[Any] = 0
@property
def A ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction="""inputs""" )
lowerCAmelCase_ : Any = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCAmelCase_ : List[Any] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def A ( self : Union[str, Any] ):
return self._config.n_layer
@property
def A ( self : Optional[Any] ):
return self._config.n_head
def A ( self : Optional[Any] , UpperCAmelCase : PreTrainedTokenizer , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[TensorType] = None , ):
lowerCAmelCase_ : Optional[Any] = super(UpperCAmelCase , self ).generate_dummy_inputs(
UpperCAmelCase , batch_size=UpperCAmelCase , seq_length=UpperCAmelCase , is_pair=UpperCAmelCase , framework=UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCAmelCase_ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCAmelCase_ , lowerCAmelCase_ : int = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCAmelCase_ : Optional[Any] = seqlen + 2
lowerCAmelCase_ : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCAmelCase_ : Optional[int] = [
(torch.zeros(UpperCAmelCase ), torch.zeros(UpperCAmelCase )) for _ in range(self.num_layers )
]
lowerCAmelCase_ : Dict = common_inputs["""attention_mask"""]
if self.use_past:
lowerCAmelCase_ : Union[str, Any] = ordered_inputs["""attention_mask"""].dtype
lowerCAmelCase_ : str = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(UpperCAmelCase , UpperCAmelCase , dtype=UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def A ( self : Optional[int] ):
return 13
| 28
| 1
|
from collections import defaultdict
def __UpperCamelCase ( lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = first_str.lower().strip()
lowerCAmelCase_ : List[Any] = second_str.lower().strip()
# Remove whitespace
lowerCAmelCase_ : Any = first_str.replace(""" """ , """""" )
lowerCAmelCase_ : Union[str, Any] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
lowerCAmelCase_ : defaultdict[str, int] = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase = input('Enter the first string ').strip()
__UpperCAmelCase = input('Enter the second string ').strip()
__UpperCAmelCase = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 28
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
| 1
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __UpperCamelCase ( lowercase__ : list[list[float]] ) -> list[list[float]]:
'''simple docstring'''
lowerCAmelCase_ : Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowercase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase_ : Any = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase_ : Tuple = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase_ , lowerCAmelCase_ : Dict = matrix[1][1], matrix[0][0]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowercase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowercase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase_ : int = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
lowerCAmelCase_ : Union[str, Any] = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase_ : Any = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase_ : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : str = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : Tuple = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase_ : Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : Union[str, Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase_ : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase_ : List[Any] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase_ : List[Any] = array(lowercase__ )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase_ : Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase_ : int = array(lowercase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowercase__ )
# Calculate the inverse of the matrix
return [[float(d(lowercase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 28
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __a :
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any]=14 , UpperCAmelCase : str=7 , UpperCAmelCase : str=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=False , UpperCAmelCase : Any=True , UpperCAmelCase : Any=99 , UpperCAmelCase : Any=32 , UpperCAmelCase : Any=4 , UpperCAmelCase : int=4 , UpperCAmelCase : str=4 , UpperCAmelCase : Tuple=37 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : List[str]=0.02 , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Optional[Any] = is_training
lowerCAmelCase_ : Optional[int] = use_input_mask
lowerCAmelCase_ : Optional[Any] = use_token_type_ids
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Tuple = hidden_size
lowerCAmelCase_ : Any = rotary_dim
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : int = num_attention_heads
lowerCAmelCase_ : Any = intermediate_size
lowerCAmelCase_ : Dict = hidden_act
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Union[str, Any] = vocab_size - 1
lowerCAmelCase_ : str = vocab_size - 1
lowerCAmelCase_ : Optional[int] = vocab_size - 1
def A ( self : List[Any] ):
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A ( self : str ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = config_and_inputs
lowerCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Tuple ):
lowerCAmelCase_ : str = 20
lowerCAmelCase_ : Dict = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
lowerCAmelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Dict = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : List[str] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Any = model(UpperCAmelCase )
lowerCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def A ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Any ):
lowerCAmelCase_ : int = 20
lowerCAmelCase_ : List[Any] = model_class_name(UpperCAmelCase )
lowerCAmelCase_ : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase_ : Optional[int] = model.init_cache(input_ids.shape[0] , UpperCAmelCase )
lowerCAmelCase_ : Dict = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase_ : Tuple = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase , past_key_values=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
lowerCAmelCase_ : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase , position_ids=UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = model(UpperCAmelCase , attention_mask=UpperCAmelCase )
lowerCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
@require_flax
class __a ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
__snake_case : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
__snake_case : Any = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A ( self : Any ):
lowerCAmelCase_ : List[str] = FlaxGPTJModelTester(self )
def A ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A ( self : Tuple ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
@tooslow
def A ( self : int ):
lowerCAmelCase_ : Optional[int] = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
lowerCAmelCase_ : Tuple = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=UpperCAmelCase , truncation=UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = model.config.eos_token_id
lowerCAmelCase_ : List[Any] = jax.jit(model.generate )
lowerCAmelCase_ : Any = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase_ : str = tokenizer.batch_decode(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : int = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Dict = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Any = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : Tuple = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : List[str] = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase )
lowerCAmelCase_ : List[str] = fx_state
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : int = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[int] = model_class.from_pretrained(UpperCAmelCase , from_pt=UpperCAmelCase )
lowerCAmelCase_ : Union[str, Any] = fx_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A ( self : Optional[Any] ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase_ : str = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : int = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase_ : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase_ : Any = getattr(UpperCAmelCase , UpperCAmelCase )
lowerCAmelCase_ : str = pt_model_class(UpperCAmelCase ).eval()
lowerCAmelCase_ : Any = model_class(UpperCAmelCase , dtype=jnp.floataa )
lowerCAmelCase_ : Union[str, Any] = load_flax_weights_in_pytorch_model(UpperCAmelCase , fx_model.params )
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = pt_inputs["""input_ids"""].shape
lowerCAmelCase_ : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[int] = 1
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : str = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase_ : List[str] = pt_model(**UpperCAmelCase ).to_tuple()
lowerCAmelCase_ : Tuple = fx_model(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase )
lowerCAmelCase_ : Optional[Any] = pt_model_class.from_pretrained(UpperCAmelCase , from_flax=UpperCAmelCase )
with torch.no_grad():
lowerCAmelCase_ : Dict = pt_model_loaded(**UpperCAmelCase ).to_tuple()
self.assertEqual(
len(UpperCAmelCase ) , len(UpperCAmelCase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A ( self : str ):
for model_class_name in self.all_model_classes:
lowerCAmelCase_ : Optional[Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
lowerCAmelCase_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase )
| 28
| 1
|
from math import sqrt
def __UpperCamelCase ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 0
for i in range(1 , int(sqrt(lowercase__ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase__ ):
total += i + n // i
elif i == sqrt(lowercase__ ):
total += i
return total - n
def __UpperCamelCase ( lowercase__ : int = 10000 ) -> int:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = sum(
i
for i in range(1 , lowercase__ )
if sum_of_divisors(sum_of_divisors(lowercase__ ) ) == i and sum_of_divisors(lowercase__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 28
|
from __future__ import annotations
from typing import Any
class __a :
def __init__( self : Dict , UpperCAmelCase : int = 6 ):
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
self.create_linked_list(UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : int ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : int = current_node
lowerCAmelCase_ : str = current_node
lowerCAmelCase_ : Union[str, Any] = current_node
for _ in range(1 , UpperCAmelCase ):
lowerCAmelCase_ : Any = Node()
lowerCAmelCase_ : Dict = current_node
lowerCAmelCase_ : Optional[int] = previous_node
lowerCAmelCase_ : Optional[Any] = current_node
lowerCAmelCase_ : List[str] = self.front
lowerCAmelCase_ : Optional[int] = previous_node
def A ( self : Any ):
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def A ( self : List[str] ):
self.check_can_perform_operation()
return self.front.data if self.front else None
def A ( self : Optional[int] , UpperCAmelCase : Any ):
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCAmelCase_ : int = self.rear.next
if self.rear:
lowerCAmelCase_ : Union[str, Any] = data
def A ( self : List[Any] ):
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCAmelCase_ : int = self.front.data
lowerCAmelCase_ : Optional[Any] = None
return data
lowerCAmelCase_ : Optional[int] = self.front
lowerCAmelCase_ : Any = old_front.next
lowerCAmelCase_ : Tuple = old_front.data
lowerCAmelCase_ : str = None
return data
def A ( self : Tuple ):
if self.is_empty():
raise Exception("""Empty Queue""" )
def A ( self : List[str] ):
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class __a :
def __init__( self : Any ):
lowerCAmelCase_ : Any | None = None
lowerCAmelCase_ : Node | None = None
lowerCAmelCase_ : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.